diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..505d7c377b880966b2015b2601b987fa57bdfe2a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,16 @@
+# Vim swap files
+*~
+*.swp
+*.swo
+
+# vscode
+.vscode
+
+# rust binaries
+bin/duniter
+neon/native/index.node
+target
+
+# files generated by rust tests
+test2.bin.gz
+**/*.wot
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1f1717cf41ee3d204670f842f05e91deb6531321
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,47 @@
+stages:
+  - tests
+  - quality
+
+.env:
+  image: registry.duniter.org/docker/rust/rust-x64-stable-ci:latest
+  tags:
+    - redshift
+  before_script:
+    - export PATH="$HOME/.cargo/bin:$PATH"
+    - rustup show && rustc --version && cargo --version
+
+tests:
+  <<: *env
+  rules:
+    - if: $CI_COMMIT_REF_NAME =~ /^wip*$/
+      when: manual
+    - if: $CI_COMMIT_TAG
+      when: never
+    - if: '$CI_MERGE_REQUEST_ID ||$CI_COMMIT_BRANCH == "main" || $CI_COMMIT_BRANCH =~ /^release/'
+    - when: manual
+  stage: tests
+  script:
+    - cargo test --all --all-features
+  coverage: '/<coverage>(\d+.\d+\%)<\/coverage>/'
+
+fmt_and_clippy:
+  extends: .env
+  rules:
+    - if: $CI_COMMIT_TAG
+      when: never
+    - if: $CI_MERGE_REQUEST_ID
+    - when: on_success
+  stage: quality
+  script:
+    - cargo fmt -- --version
+    - cargo fmt -- --check
+    - cargo clippy -- -V
+    - cargo clippy --all --tests -- -D warnings --verbose
+    
+audit_dependencies:
+  extends: .rust_env
+  before_script:
+    - cargo deny -V
+  stage: quality
+  script:
+    - cargo deny --workspace check
diff --git a/.rusty-hook.toml b/.rusty-hook.toml
new file mode 100644
index 0000000000000000000000000000000000000000..36953ed9a603654f09f73df4e934ad96a81fa2ac
--- /dev/null
+++ b/.rusty-hook.toml
@@ -0,0 +1,5 @@
+[hooks]
+pre-commit = "cargo fmt -- --check"
+
+[logging]
+verbose = true
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..60337f19f07141c1b7e5278c3b64a9dd43775a3f
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,78 @@
+# Contributing
+
+For any addition of feature or modification of existing feature, please discuss it beforehand via an issue of this repository by tagging one or more maintainers.
+
+## Commit Message Guidelines
+
+We have very precise rules over how our git commit messages can be formatted.  This leads to **more
+readable messages** that are easy to follow when looking through the **project history**.
+
+### Commit Message Format
+
+Each commit message consists of a **header**, a **body** and a **footer**.  The header has a special
+format that includes a **type**, a **scope** and a **subject**:
+
+```txt
+<type>(<scope>): <subject>
+<BLANK LINE>
+<body>
+<BLANK LINE>
+<footer>
+```
+
+The **header** is mandatory and the **scope** of the header is optional.
+
+Any line of the commit message cannot be longer 100 characters! This allows the message to be easier
+to read on GitHub as well as in various git tools.
+
+The footer should contain a [closing reference to an issue](https://help.github.com/articles/closing-issues-via-commit-messages/) if any.
+
+```txt
+docs(changelog): update changelog to beta.5
+```
+
+```txt
+fix(release): need to depend on latest rxjs and zone.js
+
+The version in our package.json gets copied to the one we publish, and users need the latest of these.
+```
+
+### Revert
+
+If the commit reverts a previous commit, it should begin with `revert: `, followed by the header of the reverted commit. In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit being reverted.
+
+### Type
+
+Must be one of the following:
+
+* **build**: Changes that affect the build system or external dependencies (example scopes: crypto, wot)
+* **chore**: Modification of the repository architecture
+* **ci**: Changes to our CI configuration files and scripts (example scopes: Github Actions, Gitlab CI)
+* **docs**: Documentation only changes
+* **feat**: Add a new feature
+* **mod**: Modify an existing feature
+* **fix**: A bug fix
+* **perf**: A code change that improves performance
+* **refactor**: A code change that neither fixes a bug nor adds a feature nor modify an existing feature
+* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc)
+* **test**: Adding missing tests or correcting existing tests
+
+### Subject
+
+The subject contains a succinct description of the change:
+
+* use the imperative, present tense: "change" not "changed" nor "changes"
+* don't capitalize the first letter
+* no dot (.) at the end
+
+### Body
+
+Just as in the **subject**, use the imperative, present tense: "change" not "changed" nor "changes".
+The body should include the motivation for the change and contrast this with previous behavior.
+
+### Footer
+
+The footer should contain any information about **Breaking Changes** and is also the place to
+reference issues that this commit **Closes**.
+
+**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines. The rest of the commit message is then used for this.
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000000000000000000000000000000000000..81c9e87cd062ecbc2adcf2eff6bd07ff6d5b4520
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,2341 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "aho-corasick"
+version = "0.7.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b"
+
+[[package]]
+name = "arrayref"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544"
+
+[[package]]
+name = "arrayvec"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
+
+[[package]]
+name = "async-attributes"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5"
+dependencies = [
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "async-channel"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319"
+dependencies = [
+ "concurrent-queue",
+ "event-listener",
+ "futures-core",
+]
+
+[[package]]
+name = "async-executor"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146"
+dependencies = [
+ "async-task",
+ "concurrent-queue",
+ "fastrand",
+ "futures-lite",
+ "once_cell",
+ "vec-arena",
+]
+
+[[package]]
+name = "async-global-executor"
+version = "2.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6"
+dependencies = [
+ "async-channel",
+ "async-executor",
+ "async-io",
+ "async-mutex",
+ "blocking",
+ "futures-lite",
+ "num_cpus",
+ "once_cell",
+]
+
+[[package]]
+name = "async-io"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcb9af4888a70ad78ecb5efcb0ba95d66a3cf54a88b62ae81559954c7588c7a2"
+dependencies = [
+ "concurrent-queue",
+ "fastrand",
+ "futures-lite",
+ "libc",
+ "log",
+ "once_cell",
+ "parking",
+ "polling",
+ "socket2",
+ "vec-arena",
+ "waker-fn",
+ "winapi",
+]
+
+[[package]]
+name = "async-lock"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b"
+dependencies = [
+ "event-listener",
+]
+
+[[package]]
+name = "async-mutex"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e"
+dependencies = [
+ "event-listener",
+]
+
+[[package]]
+name = "async-oneshot"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50f4770cbbff928c30a991de67fb3976f44d8e3e202f8c79ef91b47006e04904"
+dependencies = [
+ "futures-micro",
+]
+
+[[package]]
+name = "async-rwlock"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "261803dcc39ba9e72760ba6e16d0199b1eef9fc44e81bffabbebb9f5aea3906c"
+dependencies = [
+ "async-mutex",
+ "event-listener",
+]
+
+[[package]]
+name = "async-std"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341"
+dependencies = [
+ "async-attributes",
+ "async-channel",
+ "async-global-executor",
+ "async-io",
+ "async-lock",
+ "crossbeam-utils",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-lite",
+ "gloo-timers",
+ "kv-log-macro",
+ "log",
+ "memchr",
+ "num_cpus",
+ "once_cell",
+ "pin-project-lite",
+ "pin-utils",
+ "slab",
+ "wasm-bindgen-futures",
+]
+
+[[package]]
+name = "async-task"
+version = "4.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0"
+
+[[package]]
+name = "async-trait"
+version = "0.1.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "atomic-waker"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a"
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+
+[[package]]
+name = "base64"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
+
+[[package]]
+name = "beef"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409"
+
+[[package]]
+name = "bincode"
+version = "1.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "bitflags"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "blake3"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f"
+dependencies = [
+ "arrayref",
+ "arrayvec",
+ "cc",
+ "cfg-if 0.1.10",
+ "constant_time_eq",
+ "crypto-mac",
+ "digest 0.9.0",
+]
+
+[[package]]
+name = "block-buffer"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b"
+dependencies = [
+ "block-padding",
+ "byte-tools",
+ "byteorder",
+ "generic-array 0.12.4",
+]
+
+[[package]]
+name = "block-padding"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5"
+dependencies = [
+ "byte-tools",
+]
+
+[[package]]
+name = "blocking"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9"
+dependencies = [
+ "async-channel",
+ "async-task",
+ "atomic-waker",
+ "fastrand",
+ "futures-lite",
+ "once_cell",
+]
+
+[[package]]
+name = "bs58"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
+
+[[package]]
+name = "bstr"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d"
+dependencies = [
+ "lazy_static",
+ "memchr",
+ "regex-automata",
+ "serde",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe"
+
+[[package]]
+name = "byte-tools"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7"
+
+[[package]]
+name = "byteorder"
+version = "1.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+
+[[package]]
+name = "bytes"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040"
+
+[[package]]
+name = "cache-padded"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba"
+
+[[package]]
+name = "cast"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc38c385bfd7e444464011bb24820f40dd1c76bcdfa1b78611cb7c2e5cafab75"
+dependencies = [
+ "rustc_version",
+]
+
+[[package]]
+name = "cc"
+version = "1.0.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
+dependencies = [
+ "jobserver",
+]
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "chrono"
+version = "0.4.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
+dependencies = [
+ "libc",
+ "num-integer",
+ "num-traits",
+ "time",
+ "winapi",
+]
+
+[[package]]
+name = "ci_info"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24f638c70e8c5753795cc9a8c07c44da91554a09e4cf11a7326e8161b0a3c45e"
+dependencies = [
+ "envmnt",
+]
+
+[[package]]
+name = "clap"
+version = "2.33.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
+dependencies = [
+ "bitflags 1.2.1",
+ "textwrap",
+ "unicode-width",
+]
+
+[[package]]
+name = "cmake"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eb6210b637171dfba4cda12e579ac6dc73f5165ad56133e5d72ef3131f320855"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "concurrent-queue"
+version = "1.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3"
+dependencies = [
+ "cache-padded",
+]
+
+[[package]]
+name = "constant_time_eq"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
+
+[[package]]
+name = "crc32fast"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
+dependencies = [
+ "cfg-if 1.0.0",
+]
+
+[[package]]
+name = "criterion"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab327ed7354547cc2ef43cbe20ef68b988e70b4b593cbd66a2a61733123a3d23"
+dependencies = [
+ "atty",
+ "cast",
+ "clap",
+ "criterion-plot",
+ "csv",
+ "itertools 0.10.0",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_cbor",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d"
+dependencies = [
+ "cast",
+ "itertools 0.9.0",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4"
+dependencies = [
+ "cfg-if 1.0.0",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
+dependencies = [
+ "cfg-if 1.0.0",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12"
+dependencies = [
+ "cfg-if 1.0.0",
+ "crossbeam-utils",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49"
+dependencies = [
+ "autocfg",
+ "cfg-if 1.0.0",
+ "lazy_static",
+]
+
+[[package]]
+name = "crypto-mac"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
+dependencies = [
+ "generic-array 0.14.4",
+ "subtle",
+]
+
+[[package]]
+name = "cryptoxide"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46212f5d1792f89c3e866fb10636139464060110c568edd7f73ab5e9f736c26d"
+
+[[package]]
+name = "csv"
+version = "1.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1"
+dependencies = [
+ "bstr",
+ "csv-core",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "ctor"
+version = "0.1.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e98e2ad1a782e33928b96fc3948e7c355e5af34ba4de7670fe8bac2a3b2006d"
+dependencies = [
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "difference"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
+
+[[package]]
+name = "digest"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5"
+dependencies = [
+ "generic-array 0.12.4",
+]
+
+[[package]]
+name = "digest"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
+dependencies = [
+ "generic-array 0.14.4",
+]
+
+[[package]]
+name = "downcast"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bb454f0228b18c7f4c3b0ebbee346ed9c52e7443b0999cd543ff3571205701d"
+
+[[package]]
+name = "dubp"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "914faa8052c72c8b2f513a44398123379d70a59dfedf0aa8dc7b581ee223fbfc"
+dependencies = [
+ "dubp-block",
+ "dubp-common",
+ "dubp-documents",
+ "dubp-documents-parser",
+ "dubp-wallet",
+ "dup-crypto",
+]
+
+[[package]]
+name = "dubp-block"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b15cc90473a86c4987ea34211829d491dfb56f7c09ba79ac3d57d9430782d038"
+dependencies = [
+ "dubp-documents",
+ "dubp-documents-parser",
+ "json-pest-parser",
+ "log",
+ "serde",
+ "serde_json",
+ "thiserror",
+]
+
+[[package]]
+name = "dubp-common"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b3a5a6cc11940e0a85f492325fec45c557c5f103c92ea445427b4272c1a12395"
+dependencies = [
+ "dup-crypto",
+ "serde",
+ "serde_json",
+ "thiserror",
+ "zerocopy",
+]
+
+[[package]]
+name = "dubp-documents"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85d43233426a5a24a5d22e98da2d8f0efab9739a58af15fa27e74a213b2d5bb9"
+dependencies = [
+ "beef",
+ "dubp-wallet",
+ "log",
+ "serde",
+ "serde_json",
+ "thiserror",
+]
+
+[[package]]
+name = "dubp-documents-parser"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac382364d99af3c235530f9de41a1833d18a16dff8833a7b351e8946d378de18"
+dependencies = [
+ "dubp-documents",
+ "json-pest-parser",
+ "pest",
+ "pest_derive",
+ "serde_json",
+ "thiserror",
+]
+
+[[package]]
+name = "dubp-wallet"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47cc059e6b139def809f9d0bf776a21f5c2d59fefc20ed30c7aceedfef8de703"
+dependencies = [
+ "byteorder",
+ "dubp-common",
+ "serde",
+ "smallvec",
+ "thiserror",
+ "zerocopy",
+]
+
+[[package]]
+name = "dubp-wot"
+version = "0.11.0"
+dependencies = [
+ "bincode",
+ "log",
+ "rayon",
+ "serde",
+]
+
+[[package]]
+name = "duniter-bc-reader"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "dubp",
+ "duniter-dbs",
+ "resiter",
+ "smallvec",
+]
+
+[[package]]
+name = "duniter-conf"
+version = "0.1.0"
+dependencies = [
+ "dubp",
+ "serde",
+]
+
+[[package]]
+name = "duniter-core"
+version = "1.8.1"
+dependencies = [
+ "duniter-bc-reader",
+ "duniter-conf",
+ "duniter-dbs",
+ "duniter-dbs-write-ops",
+ "duniter-global",
+ "duniter-mempools",
+ "duniter-module",
+ "rusty-hook",
+]
+
+[[package]]
+name = "duniter-dbs"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "arrayvec",
+ "bincode",
+ "byteorder",
+ "chrono",
+ "dubp",
+ "kv_typed",
+ "log",
+ "mockall",
+ "parking_lot",
+ "paste",
+ "rand 0.7.3",
+ "serde",
+ "serde_json",
+ "smallvec",
+ "tempfile",
+ "thiserror",
+ "uninit",
+ "unwrap",
+ "zerocopy",
+]
+
+[[package]]
+name = "duniter-dbs-write-ops"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "chrono",
+ "dubp",
+ "duniter-dbs",
+ "duniter-global",
+ "fast-threadpool",
+ "flume",
+ "log",
+ "maplit",
+ "resiter",
+ "serde_json",
+]
+
+[[package]]
+name = "duniter-global"
+version = "1.8.1"
+dependencies = [
+ "async-rwlock",
+ "dubp",
+ "duniter-dbs",
+ "flume",
+ "mockall",
+ "once_cell",
+ "tokio",
+]
+
+[[package]]
+name = "duniter-mempools"
+version = "0.1.0"
+dependencies = [
+ "dubp",
+ "duniter-bc-reader",
+ "duniter-dbs",
+ "duniter-dbs-write-ops",
+ "log",
+ "thiserror",
+]
+
+[[package]]
+name = "duniter-module"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "async-trait",
+ "dubp",
+ "duniter-conf",
+ "duniter-dbs",
+ "duniter-global",
+ "duniter-mempools",
+ "fast-threadpool",
+ "log",
+ "paste",
+ "tokio",
+]
+
+[[package]]
+name = "dup-crypto"
+version = "0.51.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3b8d3e1c65e3ed89db6973e807e9c355c8f9078866402e695a16683f1e226d2"
+dependencies = [
+ "base64",
+ "blake3",
+ "bs58",
+ "byteorder",
+ "cryptoxide",
+ "getrandom 0.2.2",
+ "ring",
+ "serde",
+ "thiserror",
+ "zerocopy",
+ "zeroize",
+]
+
+[[package]]
+name = "either"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
+
+[[package]]
+name = "envmnt"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2d328fc287c61314c4a61af7cfdcbd7e678e39778488c7cb13ec133ce0f4059"
+dependencies = [
+ "fsio",
+ "indexmap",
+]
+
+[[package]]
+name = "event-listener"
+version = "2.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59"
+
+[[package]]
+name = "fake-simd"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
+
+[[package]]
+name = "fast-threadpool"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0585e8f3a04d8c4a65927a5cb5e42c6ce641528b4fc294af9d7990fcd6c4b86a"
+dependencies = [
+ "async-oneshot",
+ "flume",
+ "num_cpus",
+]
+
+[[package]]
+name = "fastrand"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3"
+dependencies = [
+ "instant",
+]
+
+[[package]]
+name = "float-cmp"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1267f4ac4f343772758f7b1bdcbe767c218bbab93bb432acbf5162bbf85a6c4"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "flume"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "11fce69af4d4582ea989e6adfc5c9b81fd2071ff89234e5c14675c82a85217df"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "nanorand",
+ "pin-project",
+ "spinning_top",
+]
+
+[[package]]
+name = "fragile"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69a039c3498dc930fe810151a34ba0c1c70b02b8625035592e74432f678591f2"
+
+[[package]]
+name = "fs2"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "fsio"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1fd087255f739f4f1aeea69f11b72f8080e9c2e7645cd06955dad4a178a49e3"
+
+[[package]]
+name = "futures-channel"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25"
+dependencies = [
+ "futures-core",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815"
+
+[[package]]
+name = "futures-io"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04"
+
+[[package]]
+name = "futures-lite"
+version = "1.11.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb"
+dependencies = [
+ "fastrand",
+ "futures-core",
+ "futures-io",
+ "memchr",
+ "parking",
+ "pin-project-lite",
+ "waker-fn",
+]
+
+[[package]]
+name = "futures-micro"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61e9325be55c5581082cd110294fa988c1f920bc573ec370ef201e33c469a95a"
+
+[[package]]
+name = "futures-sink"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23"
+
+[[package]]
+name = "fxhash"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
+dependencies = [
+ "byteorder",
+]
+
+[[package]]
+name = "gcc"
+version = "0.3.55"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+
+[[package]]
+name = "generic-array"
+version = "0.12.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd"
+dependencies = [
+ "typenum",
+]
+
+[[package]]
+name = "generic-array"
+version = "0.14.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
+dependencies = [
+ "typenum",
+ "version_check",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.1.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
+dependencies = [
+ "cfg-if 1.0.0",
+ "libc",
+ "wasi 0.9.0+wasi-snapshot-preview1",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8"
+dependencies = [
+ "cfg-if 1.0.0",
+ "js-sys",
+ "libc",
+ "wasi 0.10.0+wasi-snapshot-preview1",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "glob"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
+
+[[package]]
+name = "gloo-timers"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f"
+dependencies = [
+ "futures-channel",
+ "futures-core",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "half"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3"
+
+[[package]]
+name = "hashbrown"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "indexmap"
+version = "1.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3"
+dependencies = [
+ "autocfg",
+ "hashbrown",
+]
+
+[[package]]
+name = "instant"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec"
+dependencies = [
+ "cfg-if 1.0.0",
+]
+
+[[package]]
+name = "itertools"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
+
+[[package]]
+name = "jobserver"
+version = "0.1.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "972f5ae5d1cb9c6ae417789196c803205313edde988685da5e3aae0827b9e7fd"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "js-sys"
+version = "0.3.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "json-pest-parser"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3bc5c84a2bceeda1ce3bd58497bde2d8cba61ca0b45873ef502401f0ff2ae8ed"
+dependencies = [
+ "pest",
+ "pest_derive",
+ "thiserror",
+ "unwrap",
+]
+
+[[package]]
+name = "kv-log-macro"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f"
+dependencies = [
+ "log",
+]
+
+[[package]]
+name = "kv_typed"
+version = "0.1.0"
+dependencies = [
+ "async-std",
+ "byteorder",
+ "cfg-if 0.1.10",
+ "criterion",
+ "flume",
+ "leveldb_minimal",
+ "lmdb-zero",
+ "maybe-async",
+ "parking_lot",
+ "paste",
+ "rayon",
+ "regex",
+ "serde_json",
+ "sled",
+ "smallvec",
+ "tempfile",
+ "thiserror",
+ "uninit",
+ "unwrap",
+ "zerocopy",
+]
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "leveldb-sys"
+version = "2.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "618aee5ba3d32cb8456420a9a454aa71c1af5b3e9c7a2ec20a0f3cbbe47246cb"
+dependencies = [
+ "cmake",
+ "libc",
+ "num_cpus",
+]
+
+[[package]]
+name = "leveldb_minimal"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "53b4cb22d7d3cce486fc6e2ef7cd25d38e118f525f79c4a946ac48d89c5d16b1"
+dependencies = [
+ "leveldb-sys",
+ "libc",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.93"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41"
+
+[[package]]
+name = "liblmdb-sys"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "feed38a3a580f60bf61aaa067b0ff4123395966839adeaf67258a9e50c4d2e49"
+dependencies = [
+ "gcc",
+ "libc",
+]
+
+[[package]]
+name = "lmdb-zero"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13416eee745b087c22934f35f1f24da22da41ba2a5ce197143d168ce055cc58d"
+dependencies = [
+ "bitflags 0.9.1",
+ "libc",
+ "liblmdb-sys",
+ "supercow",
+]
+
+[[package]]
+name = "lock_api"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176"
+dependencies = [
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if 1.0.0",
+ "value-bag",
+]
+
+[[package]]
+name = "maplit"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
+
+[[package]]
+name = "maybe-async"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3919783e441e38b0512fae9eebd87c445c42e39508231fba2a8b9fd7ba88e999"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "memchr"
+version = "2.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
+
+[[package]]
+name = "memoffset"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "mockall"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18d614ad23f9bb59119b8b5670a85c7ba92c5e9adf4385c81ea00c51c8be33d5"
+dependencies = [
+ "cfg-if 1.0.0",
+ "downcast",
+ "fragile",
+ "lazy_static",
+ "mockall_derive",
+ "predicates",
+ "predicates-tree",
+]
+
+[[package]]
+name = "mockall_derive"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5dd4234635bca06fc96c7368d038061e0aae1b00a764dc817e900dc974e3deea"
+dependencies = [
+ "cfg-if 1.0.0",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "nanorand"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac1378b66f7c93a1c0f8464a19bf47df8795083842e5090f4b7305973d5a22d0"
+dependencies = [
+ "getrandom 0.2.2",
+]
+
+[[package]]
+name = "nias"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab250442c86f1850815b5d268639dff018c0627022bc1940eb2d642ca1ce12f0"
+
+[[package]]
+name = "normalize-line-endings"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
+
+[[package]]
+name = "num-integer"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
+dependencies = [
+ "autocfg",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3"
+
+[[package]]
+name = "oorandom"
+version = "11.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
+
+[[package]]
+name = "opaque-debug"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c"
+
+[[package]]
+name = "parking"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72"
+
+[[package]]
+name = "parking_lot"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb"
+dependencies = [
+ "instant",
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018"
+dependencies = [
+ "cfg-if 1.0.0",
+ "instant",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "winapi",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
+
+[[package]]
+name = "pest"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53"
+dependencies = [
+ "ucd-trie",
+]
+
+[[package]]
+name = "pest_derive"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0"
+dependencies = [
+ "pest",
+ "pest_generator",
+]
+
+[[package]]
+name = "pest_generator"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55"
+dependencies = [
+ "pest",
+ "pest_meta",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pest_meta"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d"
+dependencies = [
+ "maplit",
+ "pest",
+ "sha-1",
+]
+
+[[package]]
+name = "pin-project"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4"
+dependencies = [
+ "pin-project-internal",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "plotters"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45ca0ae5f169d0917a7c7f5a9c1a3d3d9598f18f529dd2b8373ed988efea307a"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b07fffcddc1cb3a1de753caa4e4df03b79922ba43cf882acc1bdd7e8df9f4590"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b38a02e23bd9604b842a812063aec4ef702b57989c37b655254bb61c471ad211"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "polling"
+version = "2.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fc12d774e799ee9ebae13f4076ca003b40d18a11ac0f3641e6f899618580b7b"
+dependencies = [
+ "cfg-if 1.0.0",
+ "libc",
+ "log",
+ "wepoll-sys",
+ "winapi",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
+
+[[package]]
+name = "predicates"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eeb433456c1a57cc93554dea3ce40b4c19c4057e41c55d4a0f3d84ea71c325aa"
+dependencies = [
+ "difference",
+ "float-cmp",
+ "normalize-line-endings",
+ "predicates-core",
+ "regex",
+]
+
+[[package]]
+name = "predicates-core"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451"
+
+[[package]]
+name = "predicates-tree"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15f553275e5721409451eb85e15fd9a860a6e5ab4496eb215987502b5f5391f2"
+dependencies = [
+ "predicates-core",
+ "treeline",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec"
+dependencies = [
+ "unicode-xid",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
+dependencies = [
+ "getrandom 0.1.16",
+ "libc",
+ "rand_chacha 0.2.2",
+ "rand_core 0.5.1",
+ "rand_hc 0.2.0",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
+dependencies = [
+ "libc",
+ "rand_chacha 0.3.0",
+ "rand_core 0.6.2",
+ "rand_hc 0.3.0",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
+dependencies = [
+ "ppv-lite86",
+ "rand_core 0.6.2",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
+dependencies = [
+ "getrandom 0.1.16",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
+dependencies = [
+ "getrandom 0.2.2",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
+dependencies = [
+ "rand_core 0.5.1",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
+dependencies = [
+ "rand_core 0.6.2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
+dependencies = [
+ "autocfg",
+ "crossbeam-deque",
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "lazy_static",
+ "num_cpus",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8270314b5ccceb518e7e578952f0b72b88222d02e8f77f5ecf7abbb673539041"
+dependencies = [
+ "bitflags 1.2.1",
+]
+
+[[package]]
+name = "regex"
+version = "1.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
+dependencies = [
+ "byteorder",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548"
+
+[[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "resiter"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd69ab1e90258b7769f0b5c46bfd802b8206d0707ced4ca4b9d5681b744de1be"
+
+[[package]]
+name = "ring"
+version = "0.16.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc"
+dependencies = [
+ "cc",
+ "libc",
+ "once_cell",
+ "spin",
+ "untrusted",
+ "web-sys",
+ "winapi",
+]
+
+[[package]]
+name = "rustc_version"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "rusty-hook"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96cee9be61be7e1cbadd851e58ed7449c29c620f00b23df937cb9cbc04ac21a3"
+dependencies = [
+ "ci_info",
+ "getopts",
+ "nias",
+ "toml",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "semver"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
+dependencies = [
+ "semver-parser",
+]
+
+[[package]]
+name = "semver-parser"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
+
+[[package]]
+name = "serde"
+version = "1.0.125"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_cbor"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622"
+dependencies = [
+ "half",
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.125"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "sha-1"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df"
+dependencies = [
+ "block-buffer",
+ "digest 0.8.1",
+ "fake-simd",
+ "opaque-debug",
+]
+
+[[package]]
+name = "slab"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527"
+
+[[package]]
+name = "sled"
+version = "0.34.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc"
+dependencies = [
+ "crc32fast",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+ "fs2",
+ "fxhash",
+ "libc",
+ "log",
+ "parking_lot",
+ "zstd",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "socket2"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "spin"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
+
+[[package]]
+name = "spinning_top"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8bd0ab6b8c375d2d963503b90d3770010d95bc3b5f98036f948dee24bf4e8879"
+dependencies = [
+ "lock_api",
+]
+
+[[package]]
+name = "subtle"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2"
+
+[[package]]
+name = "supercow"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "171758edb47aa306a78dfa4ab9aeb5167405bd4e3dc2b64e88f6a84bbe98bd63"
+
+[[package]]
+name = "syn"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48fe99c6bd8b1cc636890bcc071842de909d902c81ac7dab53ba33c421ab8ffb"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
+
+[[package]]
+name = "synstructure"
+version = "0.12.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "unicode-xid",
+]
+
+[[package]]
+name = "tempfile"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
+dependencies = [
+ "cfg-if 1.0.0",
+ "libc",
+ "rand 0.8.3",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "time"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
+dependencies = [
+ "libc",
+ "wasi 0.10.0+wasi-snapshot-preview1",
+ "winapi",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "tokio"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83f0c8e7c0addab50b663055baf787d0af7f413a46e6e7fb9559a4e4db7137a5"
+dependencies = [
+ "autocfg",
+ "bytes",
+ "memchr",
+ "num_cpus",
+ "pin-project-lite",
+ "tokio-macros",
+]
+
+[[package]]
+name = "tokio-macros"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "toml"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "treeline"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41"
+
+[[package]]
+name = "typenum"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06"
+
+[[package]]
+name = "ucd-trie"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+
+[[package]]
+name = "uninit"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ce382f462302087c8effe69a6c9e84ae8ce6a9cc541d921d0bb5d1fd789cdbf"
+
+[[package]]
+name = "untrusted"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
+
+[[package]]
+name = "unwrap"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7e33648dd74328e622c7be51f3b40a303c63f93e6fa5f08778b6203a4c25c20f"
+
+[[package]]
+name = "value-bag"
+version = "1.0.0-alpha.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1"
+dependencies = [
+ "ctor",
+]
+
+[[package]]
+name = "vec-arena"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34b2f665b594b07095e3ac3f718e13c2197143416fae4c5706cffb7b1af8d7f1"
+
+[[package]]
+name = "version_check"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe"
+
+[[package]]
+name = "waker-fn"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca"
+
+[[package]]
+name = "walkdir"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
+dependencies = [
+ "same-file",
+ "winapi",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.9.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
+
+[[package]]
+name = "wasi"
+version = "0.10.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9"
+dependencies = [
+ "cfg-if 1.0.0",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae"
+dependencies = [
+ "bumpalo",
+ "lazy_static",
+ "log",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-futures"
+version = "0.4.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea"
+dependencies = [
+ "cfg-if 1.0.0",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489"
+
+[[package]]
+name = "web-sys"
+version = "0.3.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "wepoll-sys"
+version = "3.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "zerocopy"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6580539ad917b7c026220c4b3f2c08d52ce54d6ce0dc491e66002e35388fab46"
+dependencies = [
+ "byteorder",
+ "zerocopy-derive",
+]
+
+[[package]]
+name = "zerocopy-derive"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc9c39e6d503229ffa00cc2954af4a751e6bbedf2a2c18e856eb3ece93d32495"
+dependencies = [
+ "proc-macro2",
+ "syn",
+ "synstructure",
+]
+
+[[package]]
+name = "zeroize"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd"
+dependencies = [
+ "zeroize_derive",
+]
+
+[[package]]
+name = "zeroize_derive"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "synstructure",
+]
+
+[[package]]
+name = "zstd"
+version = "0.5.4+zstd.1.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910"
+dependencies = [
+ "zstd-safe",
+]
+
+[[package]]
+name = "zstd-safe"
+version = "2.0.6+zstd.1.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e"
+dependencies = [
+ "libc",
+ "zstd-sys",
+]
+
+[[package]]
+name = "zstd-sys"
+version = "1.4.18+zstd.1.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81"
+dependencies = [
+ "cc",
+ "glob",
+ "itertools 0.9.0",
+ "libc",
+]
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..074aad6be6704c9f41da47553fb48fd12f03daaf
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,41 @@
+[package]
+authors = ["elois <elois@duniter.org>"]
+description = "Duniter core."
+edition = "2018"
+keywords = ["duniter"]
+license = "AGPL-3.0"
+name = "duniter-core"
+repository = "https://git.duniter.org/nodes/rust/duniter-core"
+version = "1.8.1"
+
+[dependencies]
+duniter-bc-reader = { path = "bc-reader" }
+duniter-conf = { path = "conf" }
+duniter-dbs = { path = "dbs" }
+duniter-dbs-write-ops = { path = "dbs-write-ops" }
+duniter-mempools = { path = "mempools" }
+duniter-module = { path = "module" }
+duniter-global = { path = "global" }
+
+[dev-dependencies]
+rusty-hook = "0.11.2"
+
+[workspace]
+members = [
+    "dubp-wot",
+    "bc-reader",
+    "conf",
+    "dbs",
+    "dbs-write-ops",
+    "mempools",
+    "module",
+    "global",
+    "tools/kv_typed"
+]
+
+[patch.crates-io]
+#dubp = { git = "https://git.duniter.org/libs/dubp-rs-libs" }
+
+#dubp = { path = "../dubp-rs-libs" }
+
+#leveldb_minimal = { path = "../../../../rust/leveldb_minimal" }
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..a871fcfd0f388fb04465c6c870f0c52967397ed1
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,662 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
+
diff --git a/README.md b/README.md
index 62f13767e1cbca297d2ec3775465fa2d963f31ad..3dfc9da5d3ee74952c89a56fcb1b6a064235a3e5 100644
--- a/README.md
+++ b/README.md
@@ -3,7 +3,7 @@
 [![pipeline status](https://git.duniter.org/duniter/duniter-core/badges/master/pipeline.svg)](https://git.duniter.org/libs/dubp-rs-libs/-/commits/master)
 [![dependency status](https://deps.rs/repo/github/duniter/duniter-core/status.svg)](https://deps.rs/repo/github/duniter/duniter-core)
 
-Set of Rust libraries that implement the core Duniter processing and expose everything needed by Duniter modules.
+Set of Rust libraries that implement the core Duniter logic and expose everything needed by Duniter modules.
 
 This repository includes (non-hexaustive list):
 
@@ -16,6 +16,21 @@ This repository includes (non-hexaustive list):
 
 ## Duniter modules
 
-The main Duniter modules are in the `duniter/modules' subgroup of [duniter gitlab](https://git.duniter.org/duniter/modules). Some modules may be located elsewhere, if their creator or maintainer has decided so.
+The main Duniter modules are in the [nodes/rust/modules] subgroup of [duniter gitlab]. Some modules may be located elsewhere, if their creator or maintainer has decided so.
 
 To implement your own module, you just need to define a Rust type that implements the `DuniterModule` trait.
+
+## Other repositories
+
+Duniter's code is separated into several git repositories:
+
+* **[dubp-rs-libs]** contains the logic common to Duniter and its customers.
+* **[duniter-core]** contains the core code of Duniter.
+* The gitlab subgroup **[nodes/rust/modules]** contains the main Duniter modules code (gva, admin, etc).
+* The **[duniter]** subgroup contains the "official" implementations of the "duniter-cli" and "duniter-desktop" programs with their default modules (also contains the historical implementation being migrated).
+
+[duniter gitlab]: https://git.duniter.org
+[dubp-rs-libs]: https://git.duniter.org/libs/dubp-rs-libs
+[duniter-core]: https://git.duniter.org/nodes/rust/duniter-core
+[nodes/rust/modules]: https://git.duniter.org/nodes/rust/modules
+[duniter]: https://git.duniter.org/nodes/typescript/duniter
diff --git a/bc-reader/Cargo.toml b/bc-reader/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..62f5c4aae7cd56dbc41524213be6b77472450cc5
--- /dev/null
+++ b/bc-reader/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "duniter-bc-reader"
+version = "0.1.0"
+authors = ["elois <elois@duniter.org>"]
+description = "Duniter DBs read operations"
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+keywords = ["dubp", "duniter", "blockchain", "database"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+anyhow = "1.0.34"
+duniter-dbs = { path = "../dbs" }
+dubp = { version = "0.51.0", features = ["duniter"] }
+resiter = "0.4.0"
+
+[dev-dependencies]
+smallvec = { version = "1.4.0", features = ["serde", "write"] }
diff --git a/bc-reader/src/lib.rs b/bc-reader/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..222795751ff43113b9d185c767179e020887eac2
--- /dev/null
+++ b/bc-reader/src/lib.rs
@@ -0,0 +1,30 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+use dubp::crypto::hashs::Hash;
+use duniter_dbs::{databases::bc_v2::BcV2DbReadable, kv_typed::prelude::*, HashKeyV2};
+
+pub fn tx_exist<BcDb: BcV2DbReadable>(bc_db_ro: &BcDb, hash: Hash) -> KvResult<bool> {
+    bc_db_ro.txs_hashs().contains_key(&HashKeyV2(hash))
+}
diff --git a/conf/Cargo.toml b/conf/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..f2b134f81a891bdcec6eadb601d1295f0cbfa7c8
--- /dev/null
+++ b/conf/Cargo.toml
@@ -0,0 +1,10 @@
+[package]
+name = "duniter-conf"
+version = "0.1.0"
+authors = ["librelois <elois@duniter.org>"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[dependencies]
+dubp = { version = "0.51.0", features = ["duniter"] }
+serde = { version = "1.0.105", features = ["derive"] }
diff --git a/conf/src/gva_conf.rs b/conf/src/gva_conf.rs
new file mode 100644
index 0000000000000000000000000000000000000000..38825c5617e0787d5547ad379a2df6ce6dc64658
--- /dev/null
+++ b/conf/src/gva_conf.rs
@@ -0,0 +1,113 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct GvaConf {
+    ip4: Option<Ipv4Addr>,
+    ip6: Option<Ipv6Addr>,
+    port: Option<u16>,
+    path: Option<String>,
+    subscriptions_path: Option<String>,
+    remote_host: Option<String>,
+    remote_port: Option<u16>,
+    remote_path: Option<String>,
+    remote_subscriptions_path: Option<String>,
+    remote_tls: Option<bool>,
+    whitelist: Option<Vec<IpAddr>>,
+}
+
+impl GvaConf {
+    pub fn get_ip4(&self) -> Ipv4Addr {
+        self.ip4.unwrap_or(Ipv4Addr::LOCALHOST)
+    }
+    pub fn get_ip6(&self) -> Option<Ipv6Addr> {
+        self.ip6
+    }
+    pub fn get_port(&self) -> u16 {
+        self.port.unwrap_or(30901)
+    }
+    pub fn get_path(&self) -> String {
+        if let Some(mut path) = self.path.clone() {
+            if path.starts_with('/') {
+                path.remove(0);
+                path
+            } else {
+                path
+            }
+        } else {
+            "localhost".to_owned()
+        }
+    }
+    pub fn get_subscriptions_path(&self) -> String {
+        if let Some(mut subscriptions_path) = self.subscriptions_path.clone() {
+            if subscriptions_path.starts_with('/') {
+                subscriptions_path.remove(0);
+                subscriptions_path
+            } else {
+                subscriptions_path
+            }
+        } else {
+            "localhost".to_owned()
+        }
+    }
+    pub fn get_remote_host(&self) -> String {
+        if let Some(ref remote_host) = self.remote_host {
+            remote_host.to_owned()
+        } else if let Some(ip6) = self.ip6 {
+            format!("{} [{}]", self.get_ip4(), ip6)
+        } else {
+            self.get_ip4().to_string()
+        }
+    }
+    pub fn get_remote_port(&self) -> u16 {
+        if let Some(remote_port) = self.remote_port {
+            remote_port
+        } else {
+            self.get_port()
+        }
+    }
+    pub fn get_remote_path(&self) -> String {
+        if let Some(ref remote_path) = self.remote_path {
+            remote_path.to_owned()
+        } else {
+            self.get_path()
+        }
+    }
+    pub fn get_remote_subscriptions_path(&self) -> String {
+        if let Some(ref remote_subscriptions_path) = self.remote_subscriptions_path {
+            remote_subscriptions_path.to_owned()
+        } else {
+            self.get_subscriptions_path()
+        }
+    }
+    pub fn get_remote_tls(&self) -> bool {
+        self.remote_tls.unwrap_or(false)
+    }
+    pub fn get_whitelist(&self) -> &[IpAddr] {
+        if let Some(ref whitelist) = self.whitelist {
+            whitelist
+        } else {
+            &[
+                IpAddr::V4(Ipv4Addr::LOCALHOST),
+                IpAddr::V6(Ipv6Addr::LOCALHOST),
+            ]
+        }
+    }
+}
diff --git a/conf/src/lib.rs b/conf/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5038209c6570f174b9464dcba08ef77006fec46e
--- /dev/null
+++ b/conf/src/lib.rs
@@ -0,0 +1,54 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+pub mod gva_conf;
+
+use crate::gva_conf::GvaConf;
+use dubp::crypto::keys::ed25519::Ed25519KeyPair;
+use serde::{Deserialize, Serialize};
+
+#[derive(Clone, Debug)]
+pub struct DuniterConf {
+    pub gva: Option<GvaConf>,
+    pub self_key_pair: Ed25519KeyPair,
+    pub txs_mempool_size: usize,
+}
+
+impl Default for DuniterConf {
+    fn default() -> Self {
+        DuniterConf {
+            gva: None,
+            self_key_pair: Ed25519KeyPair::generate_random().expect("fail to gen random keypair"),
+            txs_mempool_size: 0,
+        }
+    }
+}
+
+/// Duniter mode
+#[derive(Clone, Copy, Debug)]
+#[non_exhaustive]
+pub enum DuniterMode {
+    Start,
+    Sync,
+}
diff --git a/dbs-write-ops/Cargo.toml b/dbs-write-ops/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..935dd9f049e4de45f7c856e482808d88868f6efb
--- /dev/null
+++ b/dbs-write-ops/Cargo.toml
@@ -0,0 +1,35 @@
+[package]
+name = "duniter-dbs-write-ops"
+version = "0.1.0"
+authors = ["elois <elois@duniter.org>"]
+description = "Duniter DBs write operations"
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+keywords = ["dubp", "duniter", "blockchain", "database"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+chrono = "0.4.19"
+dubp = { version = "0.51.0", features = ["duniter"] }
+duniter-dbs = { path = "../dbs" }
+duniter-global = { path = "../global" }
+fast-threadpool = "0.2.3"
+flume = "0.10"
+log = "0.4.11"
+resiter = "0.4.0"
+
+[dev-dependencies]
+anyhow = "1.0.34"
+duniter-dbs = { path = "../dbs", features = ["mem"] }
+maplit = "1.0.2"
+serde_json = "1.0.53"
+
+[features]
+default = ["sled_backend"]
+
+explorer = ["duniter-dbs/explorer"]
+leveldb_backend = ["duniter-dbs/leveldb_backend"]
+sled_backend = ["duniter-dbs/sled_backend"]
diff --git a/dbs-write-ops/src/apply_block.rs b/dbs-write-ops/src/apply_block.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7d7e4511cf72d2e870a13a12c7746ad9a95b151d
--- /dev/null
+++ b/dbs-write-ops/src/apply_block.rs
@@ -0,0 +1,173 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+pub fn apply_block(
+    bc_db: &BcV2Db<FileBackend>,
+    block: Arc<DubpBlockV10>,
+    current_opt: Option<BlockMetaV2>,
+    dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+    global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
+    throw_chainability: bool,
+) -> KvResult<BlockMetaV2> {
+    if let Some(current) = current_opt {
+        if block.number().0 == current.number + 1 {
+            apply_block_inner(bc_db, dbs_pool, block, global_sender)
+        } else if throw_chainability {
+            Err(KvError::Custom(
+                format!(
+                    "block #{} not chainable on current #{}",
+                    block.number().0,
+                    current.number
+                )
+                .into(),
+            ))
+        } else {
+            Ok(current)
+        }
+    } else if block.number() == BlockNumber(0) {
+        apply_block_inner(bc_db, dbs_pool, block, global_sender)
+    } else {
+        Err(KvError::Custom(
+            "Try to apply non genesis block on empty blockchain".into(),
+        ))
+    }
+}
+
+#[inline(always)]
+pub fn apply_chunk(
+    bc_db: &BcV2Db<FileBackend>,
+    current_opt: Option<BlockMetaV2>,
+    dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+    blocks: Arc<[DubpBlockV10]>,
+    global_sender: Option<&flume::Sender<GlobalBackGroundTaskMsg>>,
+) -> KvResult<BlockMetaV2> {
+    verify_chunk_chainability(current_opt, &blocks)?;
+    apply_chunk_inner(bc_db, dbs_pool, blocks, global_sender)
+}
+
+fn verify_chunk_chainability(
+    current_opt: Option<BlockMetaV2>,
+    blocks: &[DubpBlockV10],
+) -> KvResult<()> {
+    if let Some(mut current) = current_opt {
+        for block in blocks {
+            if block.number().0 == current.number + 1 {
+                current.number += 1;
+            } else {
+                return Err(KvError::Custom(
+                    format!(
+                        "block #{} not chainable on current #{}",
+                        blocks[0].number().0,
+                        current.number
+                    )
+                    .into(),
+                ));
+            }
+        }
+        Ok(())
+    } else if blocks[0].number() == BlockNumber(0) {
+        let mut current_number = 0;
+        for block in &blocks[1..] {
+            if block.number().0 == current_number + 1 {
+                current_number += 1;
+            } else {
+                return Err(KvError::Custom(
+                    format!(
+                        "block #{} not chainable on current #{}",
+                        block.number().0,
+                        current_number
+                    )
+                    .into(),
+                ));
+            }
+        }
+        Ok(())
+    } else {
+        Err(KvError::Custom(
+            "Try to apply non genesis block on empty blockchain".into(),
+        ))
+    }
+}
+
+fn apply_block_inner(
+    bc_db: &BcV2Db<FileBackend>,
+    dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+    block: Arc<DubpBlockV10>,
+    global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
+) -> KvResult<BlockMetaV2> {
+    let block_for_cm = Arc::clone(&block);
+    let block_for_txs_mp = Arc::clone(&block);
+
+    // Cm
+    crate::cm::update_current_meta(&block_for_cm, &global_sender);
+
+    //TxsMp
+    let txs_mp_handle = dbs_pool
+        .launch(move |dbs| {
+            crate::txs_mp::apply_block(block_for_txs_mp.transactions(), &dbs.txs_mp_db)?;
+            Ok::<_, KvError>(())
+        })
+        .expect("dbs pool disconnected");
+
+    // Bc
+    let new_current = crate::bc::apply_block(bc_db, &block)?;
+
+    txs_mp_handle.join().expect("dbs pool disconnected")?;
+
+    Ok(new_current)
+}
+
+fn apply_chunk_inner(
+    bc_db: &BcV2Db<FileBackend>,
+    dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+    blocks: Arc<[DubpBlockV10]>,
+    global_sender: Option<&flume::Sender<GlobalBackGroundTaskMsg>>,
+) -> KvResult<BlockMetaV2> {
+    let blocks_len = blocks.len();
+    let blocks_for_txs_mp = Arc::clone(&blocks);
+
+    // Cm
+    if let Some(global_sender) = global_sender {
+        let chunk_len = blocks.len();
+        crate::cm::update_current_meta(&&blocks.deref()[chunk_len - 1], &global_sender);
+    }
+
+    //TxsMp
+    //log::info!("apply_chunk: launch txs_mp job...");
+    let txs_mp_handle = dbs_pool
+        .launch(move |dbs| {
+            for block in blocks_for_txs_mp.deref() {
+                crate::txs_mp::apply_block(block.transactions(), &dbs.txs_mp_db)?;
+            }
+            Ok::<_, KvError>(())
+        })
+        .expect("apply_chunk_inner:txs_mp: dbs pool disconnected");
+
+    // Bc
+    //log::info!("apply_chunk: launch bc job...");
+    for block in &blocks[..(blocks_len - 1)] {
+        crate::bc::apply_block(bc_db, block)?;
+    }
+    let current_block = crate::bc::apply_block(bc_db, &blocks[blocks_len - 1])?;
+
+    txs_mp_handle
+        .join()
+        .expect("txs_mp_recv: dbs pool disconnected")?;
+    //log::info!("apply_chunk: txs_mp job finish.");
+
+    Ok(current_block)
+}
diff --git a/dbs-write-ops/src/bc.rs b/dbs-write-ops/src/bc.rs
new file mode 100644
index 0000000000000000000000000000000000000000..17b2c1bc26d049bf6103a43b3d08849496a0b1c4
--- /dev/null
+++ b/dbs-write-ops/src/bc.rs
@@ -0,0 +1,325 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+mod identities;
+mod txs;
+mod uds;
+
+use crate::*;
+use duniter_dbs::databases::bc_v2::BcV2DbWritable;
+
+pub fn apply_block<B: Backend>(
+    bc_db: &duniter_dbs::databases::bc_v2::BcV2Db<B>,
+    block: &DubpBlockV10,
+) -> KvResult<BlockMetaV2> {
+    //log::info!("apply_block #{}", block.number().0);
+    let block_meta = BlockMetaV2::from(block);
+
+    (
+        bc_db.blocks_meta_write(),
+        bc_db.identities_write(),
+        bc_db.txs_hashs_write(),
+        bc_db.uds_write(),
+        bc_db.uds_reval_write(),
+        bc_db.uids_index_write(),
+        bc_db.utxos_write(),
+        bc_db.consumed_utxos_write(),
+    )
+        .write(
+            |(
+                mut blocks_meta,
+                mut identities,
+                mut txs_hashs,
+                mut uds,
+                mut uds_reval,
+                mut uids_index,
+                mut utxos,
+                mut consumed_utxos,
+            )| {
+                blocks_meta.upsert(U32BE(block.number().0), block_meta);
+                identities::update_identities::<B>(&block, &mut identities)?;
+                for idty in block.identities() {
+                    let pubkey = idty.issuers()[0];
+                    let username = idty.username().to_owned();
+                    uids_index.upsert(username, PubKeyValV2(pubkey));
+                }
+                if let Some(dividend) = block.dividend() {
+                    uds::create_uds::<B>(
+                        block.number(),
+                        dividend,
+                        &mut identities,
+                        &mut uds,
+                        &mut uds_reval,
+                    )?;
+                }
+                txs::apply_txs::<B>(
+                    block.number(),
+                    block.transactions(),
+                    &mut txs_hashs,
+                    &mut uds,
+                    &mut utxos,
+                    &mut consumed_utxos,
+                )?;
+                Ok(())
+            },
+        )?;
+
+    if block_meta.number > ROLL_BACK_MAX {
+        prune_bc_db(bc_db, BlockNumber(block_meta.number))?;
+    }
+
+    Ok(block_meta)
+}
+
+fn prune_bc_db<B: Backend>(
+    bc_db: &duniter_dbs::databases::bc_v2::BcV2Db<B>,
+    current_block_number: BlockNumber,
+) -> KvResult<()> {
+    bc_db
+        .consumed_utxos_write()
+        .remove(U32BE(current_block_number.0 - ROLL_BACK_MAX))?;
+    Ok(())
+}
+
+pub fn revert_block<B: Backend>(
+    bc_db: &duniter_dbs::databases::bc_v2::BcV2Db<B>,
+    block: &DubpBlockV10,
+) -> KvResult<Option<BlockMetaV2>> {
+    (
+        bc_db.blocks_meta_write(),
+        bc_db.identities_write(),
+        bc_db.txs_hashs_write(),
+        bc_db.uds_write(),
+        bc_db.uds_reval_write(),
+        bc_db.uids_index_write(),
+        bc_db.utxos_write(),
+        bc_db.consumed_utxos_write(),
+    )
+        .write(
+            |(
+                mut blocks_meta,
+                mut identities,
+                mut txs_hashs,
+                mut uds,
+                mut uds_reval,
+                mut uids_index,
+                mut utxos,
+                mut consumed_utxos,
+            )| {
+                txs::revert_txs::<B>(
+                    block.number(),
+                    block.transactions(),
+                    &mut txs_hashs,
+                    &mut uds,
+                    &mut utxos,
+                    &mut consumed_utxos,
+                )?;
+                if block.dividend().is_some() {
+                    uds::revert_uds::<B>(
+                        block.number(),
+                        &mut identities,
+                        &mut uds,
+                        &mut uds_reval,
+                    )?;
+                }
+                identities::revert_identities::<B>(&block, &mut identities)?;
+                for idty in block.identities() {
+                    let username = idty.username().to_owned();
+                    uids_index.remove(username);
+                }
+                blocks_meta.remove(U32BE(block.number().0));
+                Ok(if block.number() == BlockNumber(0) {
+                    None
+                } else {
+                    blocks_meta.get(&U32BE(block.number().0 - 1))?
+                })
+            },
+        )
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use dubp::{
+        crypto::keys::{ed25519::PublicKey, PublicKey as _},
+        documents::transaction::TransactionDocumentV10Stringified,
+        documents_parser::prelude::FromStringObject,
+    };
+    use duniter_dbs::{
+        databases::bc_v2::*, BlockUtxosV2Db, UtxoIdDbV2, WalletScriptWithSourceAmountV1Db,
+    };
+    use maplit::hashmap;
+
+    #[test]
+    fn test_bc_apply_block() -> anyhow::Result<()> {
+        let bc_db = BcV2Db::<Mem>::open(MemConf::default())?;
+
+        let s1 = WalletScriptV10::single_sig(PublicKey::from_base58(
+            "D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx",
+        )?);
+        let s2 = WalletScriptV10::single_sig(PublicKey::from_base58(
+            "4fHMTFBMo5sTQEc5p1CNWz28S4mnnqdUBmECq1zt4n2m",
+        )?);
+
+        let b0 = DubpBlockV10::from_string_object(&DubpBlockV10Stringified {
+            version: 10,
+            median_time: 5_243,
+            dividend: Some(1000),
+            joiners: vec!["D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx:FFeyrvYio9uYwY5aMcDGswZPNjGLrl8THn9l3EPKSNySD3SDSHjCljSfFEwb87sroyzJQoVzPwER0sW/cbZMDg==:0-E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855:0-E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855:elois".to_owned()],
+            inner_hash: Some("0000000A65A12DB95B3153BCD05DB4D5C30CC7F0B1292D9FFBC3DE67F72F6040".to_owned()),
+            signature: "7B0hvcfajE2G8nBLp0vLVaQcQdQIyli21Gu8F2l+nimKHRe+fUNi+MWd1e/u29BYZa+RZ1yxhbHIbFzytg7fAA==".to_owned(),
+            hash: Some("0000000000000000000000000000000000000000000000000000000000000000".to_owned()),
+            ..Default::default()
+        })?;
+
+        apply_block(&bc_db, &b0)?;
+
+        assert_eq!(bc_db.blocks_meta().count()?, 1);
+        assert_eq!(bc_db.uds().count()?, 1);
+        assert_eq!(bc_db.utxos().count()?, 0);
+        assert_eq!(bc_db.consumed_utxos().count()?, 0);
+
+        let b1 = DubpBlockV10::from_string_object(&DubpBlockV10Stringified {
+            number: 1,
+            version: 10,
+            median_time: 5_245,
+            transactions: vec![TransactionDocumentV10Stringified {
+                currency: "test".to_owned(),
+                blockstamp: "0-0000000000000000000000000000000000000000000000000000000000000000".to_owned(),
+                locktime: 0,
+                issuers: vec!["D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx".to_owned()],
+                inputs: vec!["1000:0:D:D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx:0".to_owned()],
+                unlocks: vec![],
+                outputs: vec![
+                    "600:0:SIG(4fHMTFBMo5sTQEc5p1CNWz28S4mnnqdUBmECq1zt4n2m)".to_owned(),
+                    "400:0:SIG(D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx)".to_owned(),
+                ],
+                comment: "".to_owned(),
+                signatures: vec![],
+                hash: Some("0000000000000000000000000000000000000000000000000000000000000000".to_owned()),
+            }],
+            inner_hash: Some("0000000A65A12DB95B3153BCD05DB4D5C30CC7F0B1292D9FFBC3DE67F72F6040".to_owned()),
+            signature: "7B0hvcfajE2G8nBLp0vLVaQcQdQIyli21Gu8F2l+nimKHRe+fUNi+MWd1e/u29BYZa+RZ1yxhbHIbFzytg7fAA==".to_owned(),
+            hash: Some("0000000000000000000000000000000000000000000000000000000000000000".to_owned()),
+            ..Default::default()
+        })?;
+
+        apply_block(&bc_db, &b1)?;
+
+        assert_eq!(bc_db.blocks_meta().count()?, 2);
+        assert_eq!(bc_db.uds().count()?, 0);
+        assert_eq!(bc_db.utxos().count()?, 2);
+        assert_eq!(
+            bc_db
+                .utxos()
+                .iter(.., |it| it.collect::<KvResult<Vec<_>>>())?,
+            vec![
+                (
+                    UtxoIdDbV2(Hash::default(), 0),
+                    WalletScriptWithSourceAmountV1Db {
+                        wallet_script: s2.clone(),
+                        source_amount: SourceAmount::with_base0(600)
+                    }
+                ),
+                (
+                    UtxoIdDbV2(Hash::default(), 1),
+                    WalletScriptWithSourceAmountV1Db {
+                        wallet_script: s1.clone(),
+                        source_amount: SourceAmount::with_base0(400)
+                    }
+                )
+            ]
+        );
+        assert_eq!(bc_db.consumed_utxos().count()?, 0);
+
+        let b2 = DubpBlockV10::from_string_object(&DubpBlockV10Stringified {
+            number: 2,
+            version: 10,
+            median_time: 5_247,
+            transactions: vec![TransactionDocumentV10Stringified {
+                currency: "test".to_owned(),
+                blockstamp: "0-0000000000000000000000000000000000000000000000000000000000000000".to_owned(),
+                locktime: 0,
+                issuers: vec!["D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx".to_owned()],
+                inputs: vec!["400:0:T:0000000000000000000000000000000000000000000000000000000000000000:1".to_owned()],
+                unlocks: vec![],
+                outputs: vec![
+                    "300:0:SIG(D9D2zaJoWYWveii1JRYLVK3J4Z7ZH3QczoKrnQeiM6mx)".to_owned(),
+                    "100:0:SIG(4fHMTFBMo5sTQEc5p1CNWz28S4mnnqdUBmECq1zt4n2m)".to_owned(),
+                ],
+                comment: "".to_owned(),
+                signatures: vec![],
+                hash: Some("0101010101010101010101010101010101010101010101010101010101010101".to_owned()),
+            }],
+            inner_hash: Some("0000000A65A12DB95B3153BCD05DB4D5C30CC7F0B1292D9FFBC3DE67F72F6040".to_owned()),
+            signature: "7B0hvcfajE2G8nBLp0vLVaQcQdQIyli21Gu8F2l+nimKHRe+fUNi+MWd1e/u29BYZa+RZ1yxhbHIbFzytg7fAA==".to_owned(),
+            hash: Some("0000000000000000000000000000000000000000000000000000000000000000".to_owned()),
+            ..Default::default()
+        })?;
+
+        apply_block(&bc_db, &b2)?;
+
+        assert_eq!(bc_db.blocks_meta().count()?, 3);
+        assert_eq!(bc_db.uds().count()?, 0);
+        assert_eq!(bc_db.utxos().count()?, 3);
+        assert_eq!(bc_db.consumed_utxos().count()?, 1);
+
+        assert_eq!(
+            bc_db
+                .consumed_utxos()
+                .iter(.., |it| it.collect::<KvResult<Vec<_>>>())?,
+            vec![(
+                U32BE(2),
+                BlockUtxosV2Db(
+                    hashmap![UtxoIdV10 { tx_hash: Hash::default(), output_index: 1 } => WalletScriptWithSourceAmountV1Db {
+                        wallet_script: s1.clone(),
+                        source_amount: SourceAmount::with_base0(400)
+                    }]
+                )
+            )]
+        );
+
+        assert_eq!(
+            bc_db
+                .utxos()
+                .iter(.., |it| it.collect::<KvResult<Vec<_>>>())?,
+            vec![
+                (
+                    UtxoIdDbV2(Hash::default(), 0),
+                    WalletScriptWithSourceAmountV1Db {
+                        wallet_script: s2.clone(),
+                        source_amount: SourceAmount::with_base0(600)
+                    }
+                ),
+                (
+                    UtxoIdDbV2(Hash([1; 32]), 0),
+                    WalletScriptWithSourceAmountV1Db {
+                        wallet_script: s1,
+                        source_amount: SourceAmount::with_base0(300)
+                    }
+                ),
+                (
+                    UtxoIdDbV2(Hash([1; 32]), 1),
+                    WalletScriptWithSourceAmountV1Db {
+                        wallet_script: s2,
+                        source_amount: SourceAmount::with_base0(100)
+                    }
+                )
+            ]
+        );
+
+        Ok(())
+    }
+}
diff --git a/dbs-write-ops/src/bc/identities.rs b/dbs-write-ops/src/bc/identities.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0249308747014d1ce3f7ea5ffe6f5631ac97b00b
--- /dev/null
+++ b/dbs-write-ops/src/bc/identities.rs
@@ -0,0 +1,95 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use duniter_dbs::databases::bc_v2::IdentitiesEvent;
+use duniter_dbs::IdtyDbV2;
+
+pub(crate) fn update_identities<B: Backend>(
+    block: &DubpBlockV10,
+    identities: &mut TxColRw<B::Col, IdentitiesEvent>,
+) -> KvResult<()> {
+    for idty in block.identities() {
+        let pubkey = idty.issuers()[0];
+        let username = idty.username().to_owned();
+        identities.upsert(
+            PubKeyKeyV2(pubkey),
+            IdtyDbV2 {
+                is_member: true,
+                username,
+            },
+        )
+    }
+    for mb in block.joiners() {
+        let pubkey = mb.issuers()[0];
+        let username = mb.identity_username().to_owned();
+        identities.upsert(
+            PubKeyKeyV2(pubkey),
+            IdtyDbV2 {
+                is_member: true,
+                username,
+            },
+        )
+    }
+    for revo in block.revoked() {
+        let pubkey = revo.issuer;
+        if let Some(mut idty) = identities.get(&PubKeyKeyV2(pubkey))? {
+            idty.is_member = false;
+            identities.upsert(PubKeyKeyV2(pubkey), idty)
+        }
+    }
+    for pubkey in block.excluded().iter().copied() {
+        if let Some(mut idty) = identities.get(&PubKeyKeyV2(pubkey))? {
+            idty.is_member = false;
+            identities.upsert(PubKeyKeyV2(pubkey), idty)
+        }
+    }
+    Ok(())
+}
+
+pub(crate) fn revert_identities<B: Backend>(
+    block: &DubpBlockV10,
+    identities: &mut TxColRw<B::Col, IdentitiesEvent>,
+) -> KvResult<()> {
+    for mb in block.joiners() {
+        let pubkey = mb.issuers()[0];
+        let username = mb.identity_username().to_owned();
+        identities.upsert(
+            PubKeyKeyV2(pubkey),
+            IdtyDbV2 {
+                is_member: false,
+                username,
+            },
+        )
+    }
+    for idty in block.identities() {
+        let pubkey = idty.issuers()[0];
+        identities.remove(PubKeyKeyV2(pubkey));
+    }
+    for revo in block.revoked() {
+        let pubkey = revo.issuer;
+        if let Some(mut idty) = identities.get(&PubKeyKeyV2(pubkey))? {
+            idty.is_member = true;
+            identities.upsert(PubKeyKeyV2(pubkey), idty)
+        }
+    }
+    for pubkey in block.excluded().iter().copied() {
+        if let Some(mut idty) = identities.get(&PubKeyKeyV2(pubkey))? {
+            idty.is_member = true;
+            identities.upsert(PubKeyKeyV2(pubkey), idty)
+        }
+    }
+    Ok(())
+}
diff --git a/dbs-write-ops/src/bc/txs.rs b/dbs-write-ops/src/bc/txs.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d8154fd0ca6f82f000096daaa0958d01601c4b6e
--- /dev/null
+++ b/dbs-write-ops/src/bc/txs.rs
@@ -0,0 +1,130 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use std::collections::HashMap;
+
+use crate::*;
+use dubp::documents::transaction::TransactionOutputV10;
+use duniter_dbs::{
+    databases::bc_v2::{ConsumedUtxosEvent, TxsHashsEvent, UdsEvent, UtxosEvent},
+    BlockUtxosV2Db, UdIdV2, UtxoIdDbV2, WalletScriptWithSourceAmountV1Db,
+};
+
+pub(crate) fn apply_txs<B: Backend>(
+    block_number: BlockNumber,
+    block_txs: &[TransactionDocumentV10],
+    txs_hashs: &mut TxColRw<B::Col, TxsHashsEvent>,
+    uds: &mut TxColRw<B::Col, UdsEvent>,
+    utxos: &mut TxColRw<B::Col, UtxosEvent>,
+    consumed_utxos: &mut TxColRw<B::Col, ConsumedUtxosEvent>,
+) -> KvResult<()> {
+    if !block_txs.is_empty() {
+        let mut block_consumed_utxos = HashMap::with_capacity(block_txs.len() * 3);
+        for tx in block_txs {
+            let tx_hash = tx.get_hash();
+            txs_hashs.upsert(HashKeyV2(tx_hash), ());
+            for input in tx.get_inputs() {
+                match input.id {
+                    SourceIdV10::Ud(UdSourceIdV10 {
+                        issuer,
+                        block_number,
+                    }) => {
+                        uds.remove(UdIdV2(issuer, block_number));
+                    }
+                    SourceIdV10::Utxo(utxo_id) => {
+                        let utxo_id_db = UtxoIdDbV2(utxo_id.tx_hash, utxo_id.output_index as u32);
+                        if let Some(wallet_script_with_sa) = utxos.get(&utxo_id_db)? {
+                            utxos.remove(utxo_id_db);
+                            block_consumed_utxos.insert(utxo_id, wallet_script_with_sa);
+                        } else {
+                            return Err(KvError::Custom(
+                                format!("db corrupted: not found utxo {:?}", utxo_id_db).into(),
+                            ));
+                        }
+                    }
+                }
+            }
+            for (output_index, TransactionOutputV10 { amount, conditions }) in
+                tx.get_outputs().iter().enumerate()
+            {
+                let utxo_id = UtxoIdDbV2(tx_hash, output_index as u32);
+                let wallet_script_with_sa = WalletScriptWithSourceAmountV1Db {
+                    wallet_script: conditions.script.clone(),
+                    source_amount: *amount,
+                };
+                utxos.upsert(utxo_id, wallet_script_with_sa);
+            }
+        }
+        if !block_consumed_utxos.is_empty() {
+            consumed_utxos.upsert(U32BE(block_number.0), BlockUtxosV2Db(block_consumed_utxos));
+        }
+    }
+    Ok(())
+}
+
+pub(crate) fn revert_txs<B: Backend>(
+    block_number: BlockNumber,
+    block_txs: &[TransactionDocumentV10],
+    txs_hashs: &mut TxColRw<B::Col, TxsHashsEvent>,
+    uds: &mut TxColRw<B::Col, UdsEvent>,
+    utxos: &mut TxColRw<B::Col, UtxosEvent>,
+    consumed_utxos: &mut TxColRw<B::Col, ConsumedUtxosEvent>,
+) -> KvResult<()> {
+    for tx in block_txs {
+        let tx_hash = tx.get_hash();
+        txs_hashs.remove(HashKeyV2(tx_hash));
+        for input in tx.get_inputs() {
+            match input.id {
+                SourceIdV10::Ud(UdSourceIdV10 {
+                    issuer,
+                    block_number,
+                }) => {
+                    uds.upsert(UdIdV2(issuer, block_number), ());
+                }
+                SourceIdV10::Utxo(utxo_id) => {
+                    let utxo_id_db = UtxoIdDbV2(utxo_id.tx_hash, utxo_id.output_index as u32);
+                    if let Some(block_utxos) = consumed_utxos.get(&U32BE(block_number.0))? {
+                        if let Some(wallet_script_with_sa) = block_utxos.0.get(&utxo_id) {
+                            utxos.upsert(utxo_id_db, wallet_script_with_sa.clone());
+                        } else {
+                            return Err(KvError::Custom(
+                                format!("db corrupted: not found consumed utxos {}", utxo_id)
+                                    .into(),
+                            ));
+                        }
+                    } else {
+                        return Err(KvError::Custom(
+                            format!("db corrupted: not found consumed utxos {:?}", utxo_id_db)
+                                .into(),
+                        ));
+                    }
+                }
+            }
+            if let SourceIdV10::Ud(UdSourceIdV10 {
+                issuer,
+                block_number,
+            }) = input.id
+            {
+                uds.upsert(UdIdV2(issuer, block_number), ());
+            }
+        }
+        for output_index in 0..tx.get_outputs().len() {
+            let utxo_id = UtxoIdDbV2(tx_hash, output_index as u32);
+            utxos.remove(utxo_id);
+        }
+    }
+    consumed_utxos.remove(U32BE(block_number.0));
+    Ok(())
+}
diff --git a/dbs-write-ops/src/bc/uds.rs b/dbs-write-ops/src/bc/uds.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e8323f9bfc66a80d2593efcd40554106123f384a
--- /dev/null
+++ b/dbs-write-ops/src/bc/uds.rs
@@ -0,0 +1,69 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use duniter_dbs::{
+    databases::bc_v2::{IdentitiesEvent, UdsEvent, UdsRevalEvent},
+    UdIdV2,
+};
+
+pub(crate) fn create_uds<B: Backend>(
+    block_number: BlockNumber,
+    dividend: SourceAmount,
+    identities: &mut TxColRw<B::Col, IdentitiesEvent>,
+    uds: &mut TxColRw<B::Col, UdsEvent>,
+    uds_reval: &mut TxColRw<B::Col, UdsRevalEvent>,
+) -> KvResult<()> {
+    let previous_ud_amount = uds_reval
+        .iter_rev(.., |it| it.values().next_res())?
+        .unwrap_or(SourceAmountValV2(SourceAmount::ZERO));
+    if dividend > previous_ud_amount.0 {
+        uds_reval.upsert(U32BE(block_number.0), SourceAmountValV2(dividend));
+    }
+
+    let members = identities.iter(.., |it| {
+        it.filter_map_ok(|(pk, idty)| if idty.is_member { Some(pk.0) } else { None })
+            .collect::<KvResult<Vec<_>>>()
+    })?;
+    for member in members {
+        uds.upsert(UdIdV2(member, block_number), ());
+    }
+    Ok(())
+}
+
+pub(crate) fn revert_uds<B: Backend>(
+    block_number: BlockNumber,
+    identities: &mut TxColRw<B::Col, IdentitiesEvent>,
+    uds: &mut TxColRw<B::Col, UdsEvent>,
+    uds_reval: &mut TxColRw<B::Col, UdsRevalEvent>,
+) -> KvResult<()> {
+    let previous_reval_block_number = uds_reval
+        .iter_rev(.., |it| it.keys().next_res())?
+        .expect("corrupted db")
+        .0;
+    if block_number.0 == previous_reval_block_number {
+        uds_reval.remove(U32BE(block_number.0));
+    }
+
+    let members = identities.iter(.., |it| {
+        it.filter_map_ok(|(pk, idty)| if idty.is_member { Some(pk.0) } else { None })
+            .collect::<KvResult<Vec<_>>>()
+    })?;
+    for member in members {
+        uds.remove(UdIdV2(member, block_number));
+    }
+
+    Ok(())
+}
diff --git a/dbs-write-ops/src/cm.rs b/dbs-write-ops/src/cm.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5d9a61a27fcbf5ab0ef671ca5821904807352b15
--- /dev/null
+++ b/dbs-write-ops/src/cm.rs
@@ -0,0 +1,51 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[inline(always)]
+pub(crate) fn update_current_meta(
+    block: &DubpBlockV10,
+    global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
+) {
+    let current_block_meta = block_to_block_meta(block);
+    global_sender
+        .send(GlobalBackGroundTaskMsg::NewCurrentBlock(current_block_meta))
+        .expect("global task disconnected");
+}
+
+fn block_to_block_meta(block: &DubpBlockV10) -> BlockMetaV2 {
+    BlockMetaV2 {
+        version: 10,
+        number: block.number().0,
+        hash: block.hash().0,
+        signature: block.signature(),
+        inner_hash: block.inner_hash(),
+        previous_hash: block.previous_hash(),
+        issuer: block.issuer(),
+        previous_issuer: dubp::crypto::keys::ed25519::PublicKey::default(),
+        time: block.local_time(),
+        pow_min: block.pow_min() as u32,
+        members_count: block.members_count() as u64,
+        issuers_count: block.issuers_count() as u32,
+        issuers_frame: block.issuers_frame() as u64,
+        issuers_frame_var: 0,
+        median_time: block.common_time(),
+        nonce: block.nonce(),
+        monetary_mass: block.monetary_mass(),
+        unit_base: block.unit_base() as u32,
+        dividend: block.dividend(),
+    }
+}
diff --git a/dbs-write-ops/src/lib.rs b/dbs-write-ops/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b112bce01086546f87198112b8f03dfc7a6abc33
--- /dev/null
+++ b/dbs-write-ops/src/lib.rs
@@ -0,0 +1,55 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+pub mod apply_block;
+pub mod bc;
+pub mod cm;
+pub mod txs_mp;
+
+use std::borrow::Cow;
+
+use dubp::block::prelude::*;
+use dubp::common::crypto::hashs::Hash;
+use dubp::common::prelude::*;
+use dubp::documents::{
+    prelude::*, smallvec::SmallVec, transaction::TransactionDocumentTrait,
+    transaction::TransactionDocumentV10,
+};
+use dubp::wallet::prelude::*;
+use duniter_dbs::{
+    databases::{
+        bc_v2::BcV2Db,
+        txs_mp_v2::{TxsMpV2Db, TxsMpV2DbReadable, TxsMpV2DbWritable},
+    },
+    kv_typed::prelude::*,
+    BlockMetaV2, FileBackend, HashKeyV2, PendingTxDbV2, PubKeyKeyV2, PubKeyValV2, SharedDbs,
+    SourceAmountValV2, UtxoValV2, WalletConditionsV2,
+};
+use duniter_global::GlobalBackGroundTaskMsg;
+use resiter::filter_map::FilterMap;
+use resiter::flatten::Flatten;
+use resiter::map::Map;
+use std::ops::Deref;
+
+const ROLL_BACK_MAX: u32 = 100;
diff --git a/dbs-write-ops/src/txs_mp.rs b/dbs-write-ops/src/txs_mp.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7f9e5139532553954fa592d61079721972a2bdd2
--- /dev/null
+++ b/dbs-write-ops/src/txs_mp.rs
@@ -0,0 +1,225 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+pub fn apply_block<B: Backend>(
+    block_txs: &[TransactionDocumentV10],
+    txs_mp_db: &TxsMpV2Db<B>,
+) -> KvResult<()> {
+    for tx in block_txs {
+        // Remove tx from mempool
+        remove_one_pending_tx(&txs_mp_db, tx.get_hash())?;
+    }
+    Ok(())
+}
+
+pub fn revert_block<B: Backend>(
+    block_txs: &[TransactionDocumentV10],
+    txs_mp_db: &TxsMpV2Db<B>,
+) -> KvResult<()> {
+    for tx in block_txs {
+        // Rewrite tx on mempool
+        add_pending_tx(|_, _| Ok(()), txs_mp_db, Cow::Borrowed(tx))?;
+    }
+    Ok(())
+}
+
+pub fn add_pending_tx<
+    B: Backend,
+    F: FnOnce(
+        &TransactionDocumentV10,
+        &TxColRw<B::Col, duniter_dbs::databases::txs_mp_v2::TxsEvent>,
+    ) -> KvResult<()>,
+>(
+    control: F,
+    txs_mp_db: &TxsMpV2Db<B>,
+    tx: Cow<TransactionDocumentV10>,
+) -> KvResult<()> {
+    let tx_hash = tx.get_hash();
+    let received_time = chrono::offset::Utc::now().timestamp();
+    (
+        txs_mp_db.txs_by_recv_time_write(),
+        txs_mp_db.txs_by_issuer_write(),
+        txs_mp_db.txs_by_recipient_write(),
+        txs_mp_db.txs_write(),
+        txs_mp_db.uds_ids_write(),
+        txs_mp_db.utxos_ids_write(),
+        txs_mp_db.outputs_by_script_write(),
+    )
+        .write(
+            |(
+                mut txs_by_recv_time,
+                mut txs_by_issuer,
+                mut txs_by_recipient,
+                mut txs,
+                mut uds_ids,
+                mut utxos_ids,
+                mut outputs_by_script,
+            )| {
+                control(&tx, &txs)?;
+                // Insert on col `txs_by_recv_time`
+                let mut hashs = txs_by_recv_time.get(&received_time)?.unwrap_or_default();
+                hashs.insert(tx_hash);
+                txs_by_recv_time.upsert(received_time, hashs);
+                // Insert on col `txs_by_issuer`
+                for pubkey in tx.issuers() {
+                    let mut hashs = txs_by_issuer.get(&PubKeyKeyV2(pubkey))?.unwrap_or_default();
+                    hashs.insert(tx.get_hash());
+                    txs_by_issuer.upsert(PubKeyKeyV2(pubkey), hashs);
+                }
+                // Insert on col `txs_by_recipient`
+                for pubkey in tx.recipients_keys() {
+                    let mut hashs = txs_by_recipient
+                        .get(&PubKeyKeyV2(pubkey))?
+                        .unwrap_or_default();
+                    hashs.insert(tx.get_hash());
+                    txs_by_recipient.upsert(PubKeyKeyV2(pubkey), hashs);
+                }
+                // Insert tx inputs in cols `uds_ids` and `utxos_ids`
+                for input in tx.get_inputs() {
+                    match input.id {
+                        SourceIdV10::Ud(UdSourceIdV10 {
+                            issuer,
+                            block_number,
+                        }) => uds_ids.upsert(duniter_dbs::UdIdV2(issuer, block_number), ()),
+                        SourceIdV10::Utxo(UtxoIdV10 {
+                            tx_hash,
+                            output_index,
+                        }) => utxos_ids
+                            .upsert(duniter_dbs::UtxoIdDbV2(tx_hash, output_index as u32), ()),
+                    }
+                }
+                // Insert tx outputs in col `outputs`
+                for (output_index, output) in tx.get_outputs().iter().enumerate() {
+                    let script = WalletConditionsV2(output.conditions.script.to_owned());
+                    let utxo = UtxoValV2::new(output.amount, tx_hash, output_index as u32);
+                    let mut script_outputs = outputs_by_script.get(&script)?.unwrap_or_default();
+                    script_outputs.insert(utxo);
+                    outputs_by_script.upsert(script, script_outputs);
+                }
+                // Insert tx itself
+                txs.upsert(HashKeyV2(tx_hash), PendingTxDbV2(tx.into_owned()));
+                Ok(())
+            },
+        )
+}
+
+pub fn remove_all_pending_txs<B: Backend>(txs_mp_db: &TxsMpV2Db<B>) -> KvResult<()> {
+    txs_mp_db.txs_by_recv_time_write().clear()?;
+    txs_mp_db.txs_by_issuer_write().clear()?;
+    txs_mp_db.txs_by_recipient_write().clear()?;
+    txs_mp_db.txs_write().clear()?;
+    txs_mp_db.uds_ids_write().clear()?;
+    txs_mp_db.utxos_ids_write().clear()?;
+
+    Ok(())
+}
+
+pub fn remove_pending_tx_by_hash<B: Backend>(txs_mp_db: &TxsMpV2Db<B>, hash: Hash) -> KvResult<()> {
+    remove_one_pending_tx(&txs_mp_db, hash)?;
+    Ok(())
+}
+
+pub fn trim_expired_non_written_txs<B: Backend>(
+    txs_mp_db: &TxsMpV2Db<B>,
+    limit_time: i64,
+) -> KvResult<()> {
+    // Get hashs of tx to remove and "times" to remove
+    let mut times = Vec::new();
+    let hashs = txs_mp_db.txs_by_recv_time().iter(..limit_time, |it| {
+        it.map_ok(|(k, v)| {
+            times.push(k);
+            v
+        })
+        .flatten_ok()
+        .collect::<KvResult<SmallVec<[Hash; 4]>>>()
+    })?;
+    // For each tx to remove
+    for (hash, time) in hashs.into_iter().zip(times.into_iter()) {
+        remove_one_pending_tx(&txs_mp_db, hash)?;
+        // Remove txs hashs in col `txs_by_recv_time`
+        txs_mp_db.txs_by_recv_time_write().remove(time)?;
+    }
+
+    Ok(())
+}
+
+fn remove_one_pending_tx<B: Backend>(txs_mp_db: &TxsMpV2Db<B>, tx_hash: Hash) -> KvResult<bool> {
+    if let Some(tx) = txs_mp_db.txs().get(&HashKeyV2(tx_hash))? {
+        (
+            txs_mp_db.txs_by_issuer_write(),
+            txs_mp_db.txs_by_recipient_write(),
+            txs_mp_db.txs_write(),
+            txs_mp_db.uds_ids_write(),
+            txs_mp_db.utxos_ids_write(),
+            txs_mp_db.outputs_by_script_write(),
+        )
+            .write(
+                |(
+                    mut txs_by_issuer,
+                    mut txs_by_recipient,
+                    mut txs,
+                    mut uds_ids,
+                    mut utxos_ids,
+                    mut outputs_by_script,
+                )| {
+                    // Remove tx inputs in cols `uds_ids` and `utxos_ids`
+                    for input in tx.0.get_inputs() {
+                        match input.id {
+                            SourceIdV10::Ud(UdSourceIdV10 {
+                                issuer,
+                                block_number,
+                            }) => uds_ids.remove(duniter_dbs::UdIdV2(issuer, block_number)),
+                            SourceIdV10::Utxo(UtxoIdV10 {
+                                tx_hash,
+                                output_index,
+                            }) => utxos_ids
+                                .remove(duniter_dbs::UtxoIdDbV2(tx_hash, output_index as u32)),
+                        }
+                    }
+                    // Remove tx hash in col `txs_by_issuer`
+                    for pubkey in tx.0.issuers() {
+                        let mut hashs_ =
+                            txs_by_issuer.get(&PubKeyKeyV2(pubkey))?.unwrap_or_default();
+                        hashs_.remove(&tx_hash);
+                        txs_by_issuer.upsert(PubKeyKeyV2(pubkey), hashs_)
+                    }
+                    // Remove tx hash in col `txs_by_recipient`
+                    for pubkey in tx.0.recipients_keys() {
+                        let mut hashs_ = txs_by_recipient
+                            .get(&PubKeyKeyV2(pubkey))?
+                            .unwrap_or_default();
+                        hashs_.remove(&tx_hash);
+                        txs_by_recipient.upsert(PubKeyKeyV2(pubkey), hashs_)
+                    }
+                    // Remove tx outputs in col `outputs`
+                    for (output_index, output) in tx.0.get_outputs().iter().enumerate() {
+                        let script = WalletConditionsV2(output.conditions.script.to_owned());
+                        let utxo = UtxoValV2::new(output.amount, tx_hash, output_index as u32);
+                        let mut script_outputs =
+                            outputs_by_script.get(&script)?.unwrap_or_default();
+                        script_outputs.remove(&utxo);
+                        outputs_by_script.upsert(script, script_outputs);
+                    }
+                    // Remove tx itself
+                    txs.remove(HashKeyV2(tx_hash));
+                    Ok(true)
+                },
+            )
+    } else {
+        Ok(false)
+    }
+}
diff --git a/dbs/Cargo.toml b/dbs/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..85437e4e2959a8de8b9c04e913520bd62184532b
--- /dev/null
+++ b/dbs/Cargo.toml
@@ -0,0 +1,48 @@
+[package]
+name = "duniter-dbs"
+version = "0.1.0"
+authors = ["elois <elois@duniter.org>"]
+description = "Duniter blockchain DB"
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+keywords = ["dubp", "duniter", "blockchain", "database"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+arrayvec = "0.5.1"
+bincode = "1.2.1"
+byteorder = "1.3.4"
+chrono = { version = "0.4.15", optional = true }
+dubp = { version = "0.51.0", features = ["duniter"] }
+kv_typed = { path = "../tools/kv_typed", default-features = false }
+log = "0.4.8"
+mockall = { version = "0.9.1", optional = true }
+parking_lot = "0.11.0"
+paste = "1.0.2"
+rand = "0.7.3"
+serde = { version = "1.0.105", features = ["derive"] }
+serde_json = "1.0.53"
+smallvec = { version = "1.4.0", features = ["serde", "write"] }
+thiserror = "1.0.20"
+uninit = "0.4.0"
+zerocopy = "0.3.0"
+
+[dev-dependencies]
+anyhow = "1.0.34"
+tempfile = "3.2.0"
+unwrap = "1.2.1"
+
+[features]
+default = ["sled_backend"]
+
+# CAUTION: feature "leveldb_backend" MUST BE DISABLED by default. Uncomment this line for dev/test only ! 
+#default = ["sled_backend", "explorer", "leveldb_backend"]
+
+explorer = ["chrono", "kv_typed/explorer"]
+leveldb_backend = ["kv_typed/leveldb_backend"]
+mem = []
+#mock = ["kv_typed/mock", "mockall"]
+sled_backend = ["kv_typed/sled_backend"]
diff --git a/dbs/src/databases.rs b/dbs/src/databases.rs
new file mode 100644
index 0000000000000000000000000000000000000000..19792fc7c2e0237b69f1e9a938657522105bca11
--- /dev/null
+++ b/dbs/src/databases.rs
@@ -0,0 +1,20 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+pub mod bc_v1;
+pub mod bc_v2;
+pub mod cm_v1;
+pub mod network_v1;
+pub mod txs_mp_v2;
diff --git a/dbs/src/databases/bc_v1.rs b/dbs/src/databases/bc_v1.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bf01ede5e0c999ee807d72737654a11c8d110995
--- /dev/null
+++ b/dbs/src/databases/bc_v1.rs
@@ -0,0 +1,158 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+db_schema!(
+    BcV1,
+    [
+        ["level_blockchain", MainBlocks, BlockNumberKeyV1, BlockDbV1],
+        [
+            "level_blockchain/idty",
+            MbIdty,
+            PubKeyKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/certs",
+            MbCerts,
+            PubKeyKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/joiners",
+            MbJoiners,
+            PubKeyKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/actives",
+            MbActives,
+            PubKeyKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/leavers",
+            MbLeavers,
+            PubKeyKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/excluded",
+            MbExcluded,
+            PubKeyKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/revoked",
+            MbRevoked,
+            PubKeyAndSigV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/dividends",
+            MbDividends,
+            AllKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/transactions",
+            MbTransactions,
+            AllKeyV1,
+            BlockNumberArrayV1
+        ],
+        [
+            "level_blockchain/forks",
+            ForkBlocks,
+            BlockstampKeyV1,
+            BlockDbV1
+        ],
+        ["level_bindex", Bindex, BlockNumberKeyV1, BlockHeadDbV1],
+        ["level_iindex", Iindex, PubKeyKeyV1, IIndexDbV1],
+        [
+            "level_iindex/hash",
+            IindexHash,
+            HashKeyV1,
+            PublicKeySingletonDbV1
+        ],
+        ["level_iindex/kick", IindexKick, PubKeyKeyV1, KickDbV1],
+        [
+            "level_iindex/writtenOn",
+            IindexWrittenOn,
+            BlockNumberKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        ["level_iindex/uid", Uids, UidKeyV1, PublicKeySingletonDbV1],
+        ["level_mindex", Mindex, PubKeyKeyV1, MIndexDbV1],
+        [
+            "level_mindex/expiresOn",
+            MindexExpiresOn,
+            TimestampKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        [
+            "level_mindex/revokesOn",
+            MindexRevokesOn,
+            TimestampKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        [
+            "level_mindex/writtenOn",
+            MindexWrittenOn,
+            BlockNumberKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        ["level_cindex", Cindex, PubKeyKeyV1, CIndexDbV1],
+        [
+            "level_cindex/expiresOn",
+            CindexExpiresOn,
+            BlockNumberKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        [
+            "level_cindex/writtenOn",
+            CindexWrittenOn,
+            BlockNumberKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        ["level_wallet", Wallet, WalletConditionsV1, WalletDbV1],
+        ["level_dividend", Uds, PubKeyKeyV1, UdEntryDbV1],
+        [
+            "level_dividend/level_dividend_trim_index",
+            UdsTrim,
+            BlockNumberKeyV1,
+            PublicKeyArrayDbV1
+        ],
+        ["level_sindex", Sindex, SourceKeyV1, SIndexDBV1],
+        [
+            "level_sindex/written_on",
+            SindexWrittenOn,
+            BlockNumberKeyV1,
+            SourceKeyArrayDbV1
+        ],
+        [
+            "level_sindex/consumed_on",
+            SindexConsumedOn,
+            BlockNumberKeyV1,
+            SourceKeyArrayDbV1
+        ],
+        [
+            "level_sindex/conditions",
+            SindexConditions,
+            WalletConditionsV1,
+            SourceKeyArrayDbV1
+        ],
+    ]
+);
diff --git a/dbs/src/databases/bc_v2.rs b/dbs/src/databases/bc_v2.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c5256029671a892a8e1a676e32ccb85d7f44556a
--- /dev/null
+++ b/dbs/src/databases/bc_v2.rs
@@ -0,0 +1,30 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+db_schema!(
+    BcV2,
+    [
+        ["blocks_meta", BlocksMeta, U32BE, BlockMetaV2],
+        ["identities", Identities, PubKeyKeyV2, IdtyDbV2],
+        ["txs_hashs", TxsHashs, HashKeyV2, ()],
+        ["uds", Uds, UdIdV2, ()],
+        ["uds_reval", UdsReval, U32BE, SourceAmountValV2],
+        ["uids_index", UidsIndex, String, PubKeyValV2],
+        ["utxos", Utxos, UtxoIdDbV2, WalletScriptWithSourceAmountV1Db],
+        ["consumed_utxos", ConsumedUtxos, U32BE, BlockUtxosV2Db],
+    ]
+);
diff --git a/dbs/src/databases/cm_v1.rs b/dbs/src/databases/cm_v1.rs
new file mode 100644
index 0000000000000000000000000000000000000000..4cea86827c384cc0ddaacd8fa7c0ffdaf1991051
--- /dev/null
+++ b/dbs/src/databases/cm_v1.rs
@@ -0,0 +1,18 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+db_schema!(CmV1, [["current_block", CurrentBlock, (), BlockDbV2],]);
diff --git a/dbs/src/databases/network_v1.rs b/dbs/src/databases/network_v1.rs
new file mode 100644
index 0000000000000000000000000000000000000000..db55149ae3d05b496dab1954b0e5600920ba2454
--- /dev/null
+++ b/dbs/src/databases/network_v1.rs
@@ -0,0 +1,24 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+db_schema!(
+    NetworkV1,
+    [
+        ["heads_old", HeadsOld, DunpNodeIdV1Db, DunpHeadDbV1],
+        ["peers_old", PeersOld, PubKeyKeyV2, PeerCardDbV1],
+    ]
+);
diff --git a/dbs/src/databases/txs_mp_v2.rs b/dbs/src/databases/txs_mp_v2.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e055328076de489711d120030a1f328f4e12d36e
--- /dev/null
+++ b/dbs/src/databases/txs_mp_v2.rs
@@ -0,0 +1,29 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+db_schema!(
+    TxsMpV2,
+    [
+        ["txs", Txs, HashKeyV2, PendingTxDbV2],
+        ["txs_by_issuer", TxsByIssuer, PubKeyKeyV2, BTreeSet<Hash>],
+        ["txs_by_recipient", TxsByRecipient, PubKeyKeyV2, BTreeSet<Hash>],
+        ["txs_by_received_time", TxsByRecvTime, i64, BTreeSet<Hash>],
+        ["uds_ids", UdsIds, UdIdV2, ()],
+        ["utxos_ids", UtxosIds, UtxoIdDbV2, ()],
+        ["outputs_by_script", OutputsByScript, WalletConditionsV2, BTreeSet<UtxoValV2>],
+    ]
+);
diff --git a/dbs/src/keys.rs b/dbs/src/keys.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b5d2db16bd1451130a4bca39686bba24cfd933a1
--- /dev/null
+++ b/dbs/src/keys.rs
@@ -0,0 +1,28 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+pub mod all;
+pub mod block_number;
+pub mod blockstamp;
+pub mod dunp_node_id;
+pub mod hash;
+pub mod pubkey;
+pub mod pubkey_and_sig;
+pub mod source_key;
+pub mod timestamp;
+pub mod ud_id;
+pub mod uid;
+pub mod utxo_id;
+pub mod wallet_conditions;
diff --git a/dbs/src/keys/all.rs b/dbs/src/keys/all.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3515a52f5664dd6cfbf08d8ad4f86105615553ab
--- /dev/null
+++ b/dbs/src/keys/all.rs
@@ -0,0 +1,57 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct AllKeyV1;
+
+impl AsBytes for AllKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(b"ALL")
+    }
+}
+
+impl kv_typed::prelude::FromBytes for AllKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        if bytes == b"ALL" {
+            Ok(Self)
+        } else {
+            Err(CorruptedBytes(format!(
+                "Invalid key: expected '{:?}', found '{:?}'",
+                b"ALL", bytes
+            )))
+        }
+    }
+}
+
+impl ToDumpString for AllKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for AllKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        <Self as kv_typed::prelude::FromBytes>::from_bytes(source.as_bytes())
+            .map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
diff --git a/dbs/src/keys/block_number.rs b/dbs/src/keys/block_number.rs
new file mode 100644
index 0000000000000000000000000000000000000000..afd226849ebf6a915d4ea2253c6d1af48fa7cac0
--- /dev/null
+++ b/dbs/src/keys/block_number.rs
@@ -0,0 +1,72 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct BlockNumberKeyV1(pub BlockNumber);
+
+impl AsBytes for BlockNumberKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        if self.0 == BlockNumber(u32::MAX) {
+            f(b"0000000NaN")
+        } else {
+            f(format!("{:010}", (self.0).0).as_bytes())
+        }
+    }
+}
+
+impl FromBytes for BlockNumberKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let key_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        if key_str == "0000000NaN" {
+            Ok(BlockNumberKeyV1(BlockNumber(u32::MAX)))
+        } else {
+            Ok(BlockNumberKeyV1(BlockNumber(key_str.parse().map_err(
+                |e| CorruptedBytes(format!("{}: {}", e, key_str)),
+            )?)))
+        }
+    }
+}
+
+impl ToDumpString for BlockNumberKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for BlockNumberKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(format!("{}", (self.0).0))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+
+    use super::*;
+
+    #[test]
+    fn test_block_number_str_10_ser() {
+        BlockNumberKeyV1(BlockNumber(35))
+            .as_bytes(|bytes| assert_eq!(bytes, &[48, 48, 48, 48, 48, 48, 48, 48, 51, 53]))
+    }
+}
diff --git a/dbs/src/keys/blockstamp.rs b/dbs/src/keys/blockstamp.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6b4a8df39c85170914561346d9e6b16349b89f44
--- /dev/null
+++ b/dbs/src/keys/blockstamp.rs
@@ -0,0 +1,100 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct BlockstampKeyV1(Blockstamp);
+
+impl AsBytes for BlockstampKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(format!("{:010}-{}", self.0.number.0, self.0.hash).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockstampKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let blockstamp_strs: ArrayVec<[&str; 2]> = std::str::from_utf8(bytes)
+            .map_err(|e| CorruptedBytes(e.to_string()))?
+            .split('-')
+            .collect();
+        let block_number = blockstamp_strs[0]
+            .parse()
+            .map_err(|e: ParseIntError| CorruptedBytes(e.to_string()))?;
+        let block_hash =
+            Hash::from_hex(blockstamp_strs[1]).map_err(|e| CorruptedBytes(e.to_string()))?;
+        Ok(BlockstampKeyV1(Blockstamp {
+            number: BlockNumber(block_number),
+            hash: BlockHash(block_hash),
+        }))
+    }
+}
+
+impl ToDumpString for BlockstampKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for BlockstampKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(format!("{}", self.0))
+    }
+}
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct BlockstampKeyV2(Blockstamp);
+
+impl AsBytes for BlockstampKeyV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let bytes: [u8; 36] = self.0.into();
+        f(&bytes[..])
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockstampKeyV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        use dubp::common::bytes_traits::FromBytes as _;
+        Ok(Self(
+            Blockstamp::from_bytes(bytes).map_err(|e| CorruptedBytes(e.to_string()))?,
+        ))
+    }
+}
+
+impl ToDumpString for BlockstampKeyV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for BlockstampKeyV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Ok(Self(
+            Blockstamp::from_str(source).map_err(|e| FromExplorerKeyErr(e.into()))?,
+        ))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(format!("{}", self.0))
+    }
+}
diff --git a/dbs/src/keys/dunp_node_id.rs b/dbs/src/keys/dunp_node_id.rs
new file mode 100644
index 0000000000000000000000000000000000000000..31e232d41d7fcda0ddd10ce41a7c673272fc3777
--- /dev/null
+++ b/dbs/src/keys/dunp_node_id.rs
@@ -0,0 +1,111 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use std::fmt::Display;
+use uninit::prelude::*;
+
+#[derive(
+    Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, zerocopy::AsBytes, zerocopy::FromBytes,
+)]
+#[repr(transparent)]
+pub struct DunpNodeIdV1Db([u8; 37]); // uuid ++ pubkey
+
+impl DunpNodeIdV1Db {
+    pub fn new(uuid: u32, pubkey: PublicKey) -> Self {
+        let mut buffer = uninit_array![u8; 37];
+        let (pubkey_buffer, uuid_buffer) = buffer.as_out().split_at_out(33);
+
+        pubkey_buffer.copy_from_slice(pubkey.as_ref());
+        uuid_buffer.copy_from_slice(&uuid.to_be_bytes()[..]);
+
+        Self(unsafe { std::mem::transmute(buffer) })
+    }
+    pub fn get_uuid(&self) -> u32 {
+        let mut buffer = uninit_array![u8; 4];
+
+        buffer.as_out().copy_from_slice(&self.0[33..]);
+
+        u32::from_be_bytes(unsafe { std::mem::transmute(buffer) })
+    }
+    pub fn get_pubkey(&self) -> PublicKey {
+        let mut buffer = uninit_array![u8; 33];
+
+        buffer.as_out().copy_from_slice(&self.0[..33]);
+        let bytes: [u8; 33] = unsafe { std::mem::transmute(buffer) };
+
+        PublicKey::try_from(&bytes[..]).unwrap_or_else(|_| unreachable!())
+    }
+}
+
+impl Default for DunpNodeIdV1Db {
+    fn default() -> Self {
+        DunpNodeIdV1Db([0u8; 37])
+    }
+}
+
+impl Display for DunpNodeIdV1Db {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(f, "{:x}-{}", self.get_uuid(), self.get_pubkey())
+    }
+}
+
+impl AsBytes for DunpNodeIdV1Db {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for DunpNodeIdV1Db {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let layout = zerocopy::LayoutVerified::<_, DunpNodeIdV1Db>::new(bytes)
+            .ok_or_else(|| CorruptedBytes("corrupted db".to_owned()))?;
+        Ok(*layout)
+    }
+}
+
+impl ToDumpString for DunpNodeIdV1Db {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for DunpNodeIdV1Db {
+    fn from_explorer_str(_: &str) -> std::result::Result<Self, FromExplorerKeyErr> {
+        unimplemented!()
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(self.to_string())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_serde() {
+        let node_id = DunpNodeIdV1Db::new(42, PublicKey::default());
+        assert_eq!(node_id.get_uuid(), 42);
+        assert_eq!(node_id.get_pubkey(), PublicKey::default());
+        let mut node_id_ = DunpNodeIdV1Db([0u8; 37]);
+        node_id_.0[32] = 32;
+        node_id_.0[36] = 42;
+        assert_eq!(node_id_, node_id)
+    }
+}
diff --git a/dbs/src/keys/hash.rs b/dbs/src/keys/hash.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c223810c722cdcc7a4b8ae3ac6089ee464fe4e20
--- /dev/null
+++ b/dbs/src/keys/hash.rs
@@ -0,0 +1,106 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct HashKeyV1(pub Hash);
+
+impl AsBytes for HashKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.to_hex().as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for HashKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let hash_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        Ok(HashKeyV1(
+            Hash::from_hex(&hash_str).map_err(|e| CorruptedBytes(e.to_string()))?,
+        ))
+    }
+}
+
+impl ToDumpString for HashKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for HashKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+#[repr(transparent)]
+pub struct HashKeyV2(pub Hash);
+
+impl HashKeyV2 {
+    pub fn from_ref(hash: &Hash) -> &Self {
+        #[allow(trivial_casts)]
+        unsafe {
+            &*(hash as *const Hash as *const HashKeyV2)
+        }
+    }
+}
+
+impl AsBytes for HashKeyV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for HashKeyV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        if bytes.len() != 32 {
+            Err(CorruptedBytes(format!(
+                "Invalid length: expected 32 found {}",
+                bytes.len()
+            )))
+        } else {
+            let mut buffer = [0u8; 32];
+            buffer.copy_from_slice(bytes);
+            Ok(HashKeyV2(Hash(buffer)))
+        }
+    }
+}
+
+impl ToDumpString for HashKeyV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for HashKeyV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Ok(Self(
+            Hash::from_hex(source).map_err(|e| FromExplorerKeyErr(e.into()))?,
+        ))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(self.0.to_hex())
+    }
+}
diff --git a/dbs/src/keys/pubkey.rs b/dbs/src/keys/pubkey.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7441f51303db6acfd6c629c2ef269b5efd8d997d
--- /dev/null
+++ b/dbs/src/keys/pubkey.rs
@@ -0,0 +1,117 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct PubKeyKeyV1(pub PublicKey);
+
+impl PubKeyKeyV1 {
+    const ALL: &'static str = "ALL";
+    const ALL_WITH_LEADING_1: &'static str = "11111111111111111111111111111ALL";
+
+    pub fn all() -> Self {
+        Self(PublicKey::from_base58(Self::ALL).expect("invalid PubKeyKeyV1::all()"))
+    }
+}
+
+impl AsBytes for PubKeyKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let b58_string = self.0.to_base58();
+        if b58_string == Self::ALL_WITH_LEADING_1 {
+            f(Self::ALL.as_bytes())
+        } else {
+            f(self.0.to_base58().as_bytes())
+        }
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PubKeyKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let pubkey_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        Ok(PubKeyKeyV1(PublicKey::from_base58(&pubkey_str).map_err(
+            |e| CorruptedBytes(format!("{}: {}", e, pubkey_str)),
+        )?))
+    }
+}
+
+impl ToDumpString for PubKeyKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct PubKeyKeyV2(pub PublicKey);
+
+impl AsBytes for PubKeyKeyV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PubKeyKeyV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        Ok(PubKeyKeyV2(PublicKey::try_from(bytes).map_err(|e| {
+            CorruptedBytes(format!("{}: {:?}", e, bytes))
+        })?))
+    }
+}
+
+impl ToDumpString for PubKeyKeyV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for PubKeyKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for PubKeyKeyV2 {
+    fn from_explorer_str(pubkey_str: &str) -> std::result::Result<Self, FromExplorerKeyErr> {
+        Ok(PubKeyKeyV2(PublicKey::from_base58(&pubkey_str).map_err(
+            |e| FromExplorerKeyErr(format!("{}: {}", e, pubkey_str).into()),
+        )?))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(self.0.to_base58())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+
+    use super::*;
+    #[test]
+    fn pubkey_all() {
+        let all = PubKeyKeyV1::all();
+        assert_eq!(
+            all.as_bytes(|bytes| bytes.to_vec()),
+            PubKeyKeyV1::ALL.as_bytes()
+        )
+    }
+}
diff --git a/dbs/src/keys/pubkey_and_sig.rs b/dbs/src/keys/pubkey_and_sig.rs
new file mode 100644
index 0000000000000000000000000000000000000000..47a14c90c0e2ec5805cd09cc272ba04c5c519d9f
--- /dev/null
+++ b/dbs/src/keys/pubkey_and_sig.rs
@@ -0,0 +1,69 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
+pub struct PubKeyAndSigV1(PublicKey, Signature);
+
+impl PubKeyAndSigV1 {
+    pub fn all() -> Self {
+        Self(PublicKey::default(), Signature([0u8; 64]))
+    }
+}
+
+impl AsBytes for PubKeyAndSigV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        if self == &Self::all() {
+            f(b"ALL")
+        } else {
+            f(format!("{}:{}", self.0.to_base58(), self.1.to_base64()).as_bytes())
+        }
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PubKeyAndSigV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let raw_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        if raw_str == "ALL" {
+            Ok(PubKeyAndSigV1::all())
+        } else {
+            let array_str: ArrayVec<[&str; 2]> = raw_str.split(':').collect();
+            let pubkey =
+                PublicKey::from_base58(array_str[0]).map_err(|e| CorruptedBytes(e.to_string()))?;
+            let sig =
+                Signature::from_base64(array_str[1]).map_err(|e| CorruptedBytes(e.to_string()))?;
+            Ok(PubKeyAndSigV1(pubkey, sig))
+        }
+    }
+}
+
+impl ToDumpString for PubKeyAndSigV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for PubKeyAndSigV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
diff --git a/dbs/src/keys/source_key.rs b/dbs/src/keys/source_key.rs
new file mode 100644
index 0000000000000000000000000000000000000000..accb5c9a972212a9f767102bdcfc83a1c724b7ff
--- /dev/null
+++ b/dbs/src/keys/source_key.rs
@@ -0,0 +1,93 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, PartialOrd, Serialize)]
+pub struct SourceKeyV1 {
+    pub tx_hash: Hash,
+    pub pos: u32,
+    pub consumed: Option<bool>,
+}
+
+impl ToString for SourceKeyV1 {
+    fn to_string(&self) -> String {
+        format!(
+            "{}-{:010}{}",
+            self.tx_hash,
+            self.pos,
+            match self.consumed {
+                Some(true) => "-1",
+                Some(false) => "-0",
+                None => "",
+            }
+        )
+    }
+}
+
+impl AsBytes for SourceKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.to_string().as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for SourceKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let strs: ArrayVec<[&str; 3]> = std::str::from_utf8(bytes)
+            .map_err(|e| CorruptedBytes(e.to_string()))?
+            .split('-')
+            .collect();
+        let tx_hash = Hash::from_hex(strs[0]).map_err(|e| CorruptedBytes(e.to_string()))?;
+        let pos = strs[1]
+            .parse()
+            .map_err(|e: ParseIntError| CorruptedBytes(e.to_string()))?;
+        let consumed = if strs.len() <= 2 {
+            None
+        } else {
+            match strs[2] {
+                "1" => Some(true),
+                "0" => Some(false),
+                _ => {
+                    return Err(CorruptedBytes(
+                        "invalid format: field consumed must be encoded with '0' or '1'".to_owned(),
+                    ))
+                }
+            }
+        };
+        Ok(SourceKeyV1 {
+            tx_hash,
+            pos,
+            consumed,
+        })
+    }
+}
+
+impl ToDumpString for SourceKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for SourceKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
diff --git a/dbs/src/keys/timestamp.rs b/dbs/src/keys/timestamp.rs
new file mode 100644
index 0000000000000000000000000000000000000000..96a6cb8ced60be558084bfa65b3edd85cb7b1e6f
--- /dev/null
+++ b/dbs/src/keys/timestamp.rs
@@ -0,0 +1,56 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct TimestampKeyV1(pub u64);
+
+impl AsBytes for TimestampKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(format!("{}", self.0).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for TimestampKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let key_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        Ok(TimestampKeyV1(key_str.parse().map_err(|e| {
+            CorruptedBytes(format!("{}: {}", e, key_str))
+        })?))
+    }
+}
+
+impl ToDumpString for TimestampKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for TimestampKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        NaiveDateTime::parse_from_str(source, "%Y-%m-%d %H:%M:%S")
+            .map(|dt| TimestampKeyV1(dt.timestamp() as u64))
+            .map_err(|e| FromExplorerKeyErr(format!("{}: {}", e, source).into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(NaiveDateTime::from_timestamp(self.0 as i64, 0)
+            .format("%Y-%m-%d %H:%M:%S")
+            .to_string())
+    }
+}
diff --git a/dbs/src/keys/ud_id.rs b/dbs/src/keys/ud_id.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a1e3b24ce68119268899cae63aecba6be0147ad8
--- /dev/null
+++ b/dbs/src/keys/ud_id.rs
@@ -0,0 +1,123 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use uninit::prelude::*;
+
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
+pub struct UdIdV2(pub PublicKey, pub BlockNumber);
+
+impl PartialOrd for UdIdV2 {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        match self.0.partial_cmp(&other.0) {
+            Some(std::cmp::Ordering::Equal) => self.1.partial_cmp(&other.1),
+            o => o,
+        }
+    }
+}
+impl Ord for UdIdV2 {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        match self.0.cmp(&other.0) {
+            std::cmp::Ordering::Equal => self.1.cmp(&other.1),
+            o => o,
+        }
+    }
+}
+
+impl AsBytes for UdIdV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let mut buffer = uninit_array![u8; 37];
+        let (pubkey_buffer, block_number_buffer) = buffer.as_out().split_at_out(33);
+        let pubkey_buffer = pubkey_buffer.copy_from_slice(self.0.as_ref());
+        block_number_buffer.copy_from_slice(&(self.1).0.to_be_bytes());
+        f(unsafe { std::slice::from_raw_parts_mut(pubkey_buffer.as_mut_ptr(), 37) })
+    }
+}
+
+impl FromBytes for UdIdV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let pubkey = PublicKey::try_from(&bytes[..33])
+            .map_err(|e| CorruptedBytes(format!("{}: {:?}", e, bytes)))?;
+        let block_number = BlockNumber(
+            zerocopy::LayoutVerified::<_, zerocopy::U32<byteorder::BigEndian>>::new(&bytes[33..])
+                .ok_or_else(|| {
+                    CorruptedBytes(
+                        "Corrupted DB: BlockNumber bytes are invalid length or unaligned"
+                            .to_owned(),
+                    )
+                })?
+                .get(),
+        );
+        Ok(UdIdV2(pubkey, block_number))
+    }
+}
+
+impl ToDumpString for UdIdV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for UdIdV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        let mut source = source.split(':');
+        if let Some(pubkey_str) = source.next() {
+            let pubkey = PublicKey::from_base58(&pubkey_str)
+                .map_err(|e| FromExplorerKeyErr(format!("{}: {}", e, pubkey_str).into()))?;
+            if let Some(block_number_str) = source.next() {
+                Ok(UdIdV2(
+                    pubkey,
+                    BlockNumber::from_str(block_number_str)
+                        .map_err(|e| FromExplorerKeyErr(e.into()))?,
+                ))
+            } else {
+                Err(FromExplorerKeyErr("UdIdV2: Invalid format".into()))
+            }
+        } else {
+            Err(FromExplorerKeyErr("UdIdV2: Invalid format".into()))
+        }
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(format!("{}:{}", self.0.to_base58(), (self.1).0))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn ud_id_v2_as_bytes() -> anyhow::Result<()> {
+        let ud_id = UdIdV2(PublicKey::default(), BlockNumber(3));
+
+        let ud_id_2_res = ud_id.as_bytes(|bytes| {
+            assert_eq!(
+                bytes,
+                [
+                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 3
+                ]
+            );
+            UdIdV2::from_bytes(bytes)
+        });
+
+        assert_eq!(ud_id_2_res?, ud_id);
+
+        Ok(())
+    }
+}
diff --git a/dbs/src/keys/uid.rs b/dbs/src/keys/uid.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a5e4bd7dde0f5cb8642dbb2987a4d10b9db8feae
--- /dev/null
+++ b/dbs/src/keys/uid.rs
@@ -0,0 +1,65 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+const USERNAME_MAX_LEN: usize = 100;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct UidKeyV1(pub ArrayString<[u8; USERNAME_MAX_LEN]>);
+
+impl AsBytes for UidKeyV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_str().as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for UidKeyV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let uid_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        Ok(Self(
+            ArrayString::<[u8; USERNAME_MAX_LEN]>::from_str(uid_str)
+                .map_err(|e| CorruptedBytes(e.to_string()))?,
+        ))
+    }
+}
+
+impl FromStr for UidKeyV1 {
+    type Err = arrayvec::CapacityError;
+
+    fn from_str(source: &str) -> std::result::Result<Self, Self::Err> {
+        Ok(UidKeyV1(ArrayString::<[u8; USERNAME_MAX_LEN]>::from_str(
+            source,
+        )?))
+    }
+}
+
+impl ToDumpString for UidKeyV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for UidKeyV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
diff --git a/dbs/src/keys/utxo_id.rs b/dbs/src/keys/utxo_id.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ca57bf0cbe3534eb83110a6bef29d694310711a1
--- /dev/null
+++ b/dbs/src/keys/utxo_id.rs
@@ -0,0 +1,124 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use uninit::prelude::*;
+
+type OutputIndex = u32;
+
+#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
+pub struct UtxoIdDbV2(pub Hash, pub OutputIndex);
+
+impl PartialOrd for UtxoIdDbV2 {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        match self.0.partial_cmp(&other.0) {
+            Some(std::cmp::Ordering::Equal) => self.1.partial_cmp(&other.1),
+            o => o,
+        }
+    }
+}
+impl Ord for UtxoIdDbV2 {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        match self.0.cmp(&other.0) {
+            std::cmp::Ordering::Equal => self.1.cmp(&other.1),
+            o => o,
+        }
+    }
+}
+
+impl AsBytes for UtxoIdDbV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let mut buffer = uninit_array![u8; 36];
+        let (hash_buffer, index_buffer) = buffer.as_out().split_at_out(32);
+        let hash_buffer = hash_buffer.copy_from_slice(self.0.as_ref());
+        index_buffer.copy_from_slice(&(self.1).to_be_bytes());
+        f(unsafe { std::slice::from_raw_parts_mut(hash_buffer.as_mut_ptr(), 36) })
+    }
+}
+
+impl FromBytes for UtxoIdDbV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let hash = zerocopy::LayoutVerified::<_, Hash>::new(&bytes[..32]).ok_or_else(|| {
+            CorruptedBytes("Corrupted DB: Hash bytes are invalid length or unaligned".to_owned())
+        })?;
+        let output_index =
+            zerocopy::LayoutVerified::<_, zerocopy::U32<byteorder::BigEndian>>::new(&bytes[32..])
+                .ok_or_else(|| {
+                    CorruptedBytes(
+                        "Corrupted DB: OutputIndex bytes are invalid length or unaligned"
+                            .to_owned(),
+                    )
+                })?
+                .get();
+        Ok(UtxoIdDbV2(*hash, output_index))
+    }
+}
+
+impl ToDumpString for UtxoIdDbV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for UtxoIdDbV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        let mut source = source.split(':');
+        if let Some(hash_str) = source.next() {
+            let hash = Hash::from_hex(&hash_str)
+                .map_err(|e| FromExplorerKeyErr(format!("{}: {}", e, hash_str).into()))?;
+            if let Some(output_index_str) = source.next() {
+                Ok(UtxoIdDbV2(
+                    hash,
+                    u32::from_str(output_index_str).map_err(|e| FromExplorerKeyErr(e.into()))?,
+                ))
+            } else {
+                Err(FromExplorerKeyErr("UtxoIdDbV2: Invalid format".into()))
+            }
+        } else {
+            Err(FromExplorerKeyErr("UtxoIdDbV2: Invalid format".into()))
+        }
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(format!("{}:{}", self.0.to_hex(), self.1))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn utxo_id_v2_as_bytes() -> anyhow::Result<()> {
+        let utxo_id = UtxoIdDbV2(Hash::default(), 3);
+
+        let utxo_id_2_res = utxo_id.as_bytes(|bytes| {
+            assert_eq!(
+                bytes,
+                [
+                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 0, 0, 0, 0, 0, 0, 0, 3
+                ]
+            );
+            UtxoIdDbV2::from_bytes(bytes)
+        });
+
+        assert_eq!(utxo_id_2_res?, utxo_id);
+
+        Ok(())
+    }
+}
diff --git a/dbs/src/keys/wallet_conditions.rs b/dbs/src/keys/wallet_conditions.rs
new file mode 100644
index 0000000000000000000000000000000000000000..56af52a2a82b95555acf4934bff7743badcd56bb
--- /dev/null
+++ b/dbs/src/keys/wallet_conditions.rs
@@ -0,0 +1,112 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+const CONDITIONS_MAX_LEN: usize = 256;
+
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd)]
+pub struct WalletConditionsV1(pub ArrayString<[u8; CONDITIONS_MAX_LEN]>);
+
+impl AsBytes for WalletConditionsV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_str().as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for WalletConditionsV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let uid_str = std::str::from_utf8(bytes).map_err(|e| CorruptedBytes(e.to_string()))?;
+        Ok(Self(
+            ArrayString::<[u8; CONDITIONS_MAX_LEN]>::from_str(uid_str)
+                .map_err(|e| CorruptedBytes(e.to_string()))?,
+        ))
+    }
+}
+
+impl FromStr for WalletConditionsV1 {
+    type Err = arrayvec::CapacityError;
+
+    fn from_str(source: &str) -> std::result::Result<Self, Self::Err> {
+        Ok(WalletConditionsV1(
+            ArrayString::<[u8; CONDITIONS_MAX_LEN]>::from_str(source)?,
+        ))
+    }
+}
+
+impl ToDumpString for WalletConditionsV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for WalletConditionsV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerKeyErr(e.0.into()))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        self.as_bytes(|bytes| Ok(unsafe { std::str::from_utf8_unchecked(bytes) }.to_owned()))
+    }
+}
+
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+pub struct WalletConditionsV2(pub WalletScriptV10);
+
+impl WalletConditionsV2 {
+    pub fn from_ref(script: &WalletScriptV10) -> &Self {
+        #[allow(trivial_casts)]
+        unsafe {
+            &*(script as *const WalletScriptV10 as *const WalletConditionsV2)
+        }
+    }
+}
+
+impl AsBytes for WalletConditionsV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let mut buffer = SmallVec::<[u8; 256]>::new();
+        bincode::serialize_into(&mut buffer, &self.0).unwrap_or_else(|_| unreachable!());
+        f(buffer.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for WalletConditionsV2 {
+    type Err = bincode::Error;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        Ok(Self(bincode::deserialize(bytes)?))
+    }
+}
+
+impl ToDumpString for WalletConditionsV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableKey for WalletConditionsV2 {
+    fn from_explorer_str(s: &str) -> std::result::Result<Self, FromExplorerKeyErr> {
+        Ok(Self(
+            dubp::documents_parser::wallet_script_from_str(s)
+                .map_err(|e| FromExplorerKeyErr(e.into()))?,
+        ))
+    }
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(self.0.to_string())
+    }
+}
diff --git a/dbs/src/lib.rs b/dbs/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..63d8a82ad9b4898917bd8ab75e75c18f5ec0bdfb
--- /dev/null
+++ b/dbs/src/lib.rs
@@ -0,0 +1,144 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![allow(clippy::upper_case_acronyms)]
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+pub mod databases;
+mod keys;
+mod open_dbs;
+mod values;
+
+// Re-export dependencies
+pub use arrayvec;
+#[cfg(feature = "explorer")]
+pub use kv_typed::regex;
+pub use serde;
+pub use serde_json;
+pub use smallvec;
+
+// Re-export crates
+pub use kv_typed;
+
+// Prelude
+pub mod prelude {
+    pub use crate::open_dbs::BackendConf;
+    pub use crate::SharedDbs;
+    #[cfg(feature = "explorer")]
+    pub use kv_typed::explorer::{
+        DbExplorable, EntryFound, ExplorerAction, ExplorerActionResponse, ValueCaptures,
+    };
+}
+
+// Export technical types and functions
+pub use crate::open_dbs::open_dbs;
+
+// Export profession types
+pub use crate::keys::utxo_id::UtxoIdDbV2;
+pub use keys::all::AllKeyV1;
+pub use keys::block_number::BlockNumberKeyV1;
+pub use keys::blockstamp::{BlockstampKeyV1, BlockstampKeyV2};
+pub use keys::dunp_node_id::DunpNodeIdV1Db;
+pub use keys::hash::{HashKeyV1, HashKeyV2};
+pub use keys::pubkey::{PubKeyKeyV1, PubKeyKeyV2};
+pub use keys::pubkey_and_sig::PubKeyAndSigV1;
+pub use keys::source_key::SourceKeyV1;
+pub use keys::timestamp::TimestampKeyV1;
+pub use keys::ud_id::UdIdV2;
+pub use keys::uid::UidKeyV1;
+pub use keys::wallet_conditions::{WalletConditionsV1, WalletConditionsV2};
+pub use values::block_db::{BlockDbEnum, BlockDbV1, BlockDbV2, TransactionInBlockDbV1};
+pub use values::block_head_db::BlockHeadDbV1;
+pub use values::block_meta::BlockMetaV2;
+pub use values::block_number_array_db::BlockNumberArrayV1;
+pub use values::cindex_db::CIndexDbV1;
+pub use values::dunp_head::DunpHeadDbV1;
+pub use values::idty_db::IdtyDbV2;
+pub use values::iindex_db::IIndexDbV1;
+pub use values::kick_db::KickDbV1;
+pub use values::mindex_db::MIndexDbV1;
+pub use values::peer_card::PeerCardDbV1;
+pub use values::pubkey_db::{PubKeyValV2, PublicKeyArrayDbV1, PublicKeySingletonDbV1};
+pub use values::sindex_db::{SIndexDBV1, SourceKeyArrayDbV1};
+pub use values::source_amount::SourceAmountValV2;
+pub use values::tx_db::PendingTxDbV2;
+pub use values::txs::BlockTxsDbV2;
+pub use values::ud_entry_db::{ConsumedUdDbV1, UdAmountDbV1, UdEntryDbV1};
+pub use values::utxo::{BlockUtxosV2Db, UtxoValV2};
+pub use values::wallet_db::WalletDbV1;
+pub use values::wallet_script_with_sa::WalletScriptWithSourceAmountV1Db;
+
+// Crate imports
+pub(crate) use arrayvec::{ArrayString, ArrayVec};
+#[cfg(feature = "explorer")]
+use chrono::NaiveDateTime;
+pub(crate) use dubp::common::crypto::bases::b58::ToBase58 as _;
+pub(crate) use dubp::common::crypto::bases::BaseConversionError;
+pub(crate) use dubp::common::crypto::hashs::Hash;
+pub(crate) use dubp::common::crypto::keys::ed25519::{PublicKey, Signature};
+pub(crate) use dubp::common::crypto::keys::{PublicKey as _, Signature as _};
+pub(crate) use dubp::common::prelude::*;
+pub(crate) use dubp::documents::dubp_wallet::prelude::*;
+pub(crate) use kv_typed::db_schema;
+pub(crate) use kv_typed::prelude::*;
+pub(crate) use serde::{Deserialize, Serialize};
+pub(crate) use smallvec::SmallVec;
+pub(crate) use std::{
+    collections::BTreeSet, convert::TryFrom, fmt::Debug, iter::Iterator, num::ParseIntError,
+    path::Path, str::FromStr,
+};
+
+#[derive(Debug, Error)]
+#[error("{0}")]
+pub struct CorruptedBytes(pub String);
+
+pub trait ToDumpString {
+    fn to_dump_string(&self) -> String;
+}
+
+#[cfg(all(not(feature = "mem"), not(test)))]
+pub type FileBackend = kv_typed::backend::sled::Sled;
+#[cfg(any(feature = "mem", test))]
+pub type FileBackend = kv_typed::backend::memory::Mem;
+
+#[derive(Clone, Debug)]
+pub struct SharedDbs<B: Backend> {
+    pub bc_db_ro: databases::bc_v2::BcV2DbRo<B>,
+    pub cm_db: databases::cm_v1::CmV1Db<MemSingleton>,
+    pub dunp_db: databases::network_v1::NetworkV1Db<B>,
+    pub txs_mp_db: databases::txs_mp_v2::TxsMpV2Db<B>,
+}
+
+impl SharedDbs<Mem> {
+    pub fn mem() -> KvResult<Self> {
+        use databases::bc_v2::BcV2DbWritable as _;
+        use databases::cm_v1::CmV1DbWritable as _;
+        use databases::network_v1::NetworkV1DbWritable as _;
+        use databases::txs_mp_v2::TxsMpV2DbWritable as _;
+        Ok(SharedDbs {
+            bc_db_ro: databases::bc_v2::BcV2Db::<Mem>::open(MemConf::default())?.get_ro_handler(),
+            cm_db: databases::cm_v1::CmV1Db::<MemSingleton>::open(MemSingletonConf::default())?,
+            dunp_db: databases::network_v1::NetworkV1Db::<Mem>::open(MemConf::default())?,
+            txs_mp_db: databases::txs_mp_v2::TxsMpV2Db::<Mem>::open(MemConf::default())?,
+        })
+    }
+}
diff --git a/dbs/src/open_dbs.rs b/dbs/src/open_dbs.rs
new file mode 100644
index 0000000000000000000000000000000000000000..05f1ffd1b12fd46a533a93eb19e69b5b447d96be
--- /dev/null
+++ b/dbs/src/open_dbs.rs
@@ -0,0 +1,109 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::databases::bc_v2::BcV2DbWritable as _;
+use crate::databases::cm_v1::CmV1DbWritable as _;
+use crate::databases::network_v1::NetworkV1DbWritable as _;
+use crate::databases::txs_mp_v2::TxsMpV2DbWritable as _;
+use crate::*;
+
+pub fn open_dbs<B: BackendConf>(
+    profile_path_opt: Option<&Path>,
+) -> KvResult<(crate::databases::bc_v2::BcV2Db<B>, SharedDbs<B>)> {
+    let bc_db = crate::databases::bc_v2::BcV2Db::<B>::open(B::gen_backend_conf(
+        crate::databases::bc_v2::BcV2Db::<B>::NAME,
+        profile_path_opt,
+    ))?;
+    let dbs = SharedDbs {
+        bc_db_ro: bc_db.get_ro_handler(),
+        cm_db: crate::databases::cm_v1::CmV1Db::<MemSingleton>::open(MemSingletonConf::default())
+            .expect("fail to open CmV1 DB"),
+        dunp_db: crate::databases::network_v1::NetworkV1Db::<B>::open(B::gen_backend_conf(
+            "dunp_v1",
+            profile_path_opt,
+        ))?,
+        txs_mp_db: crate::databases::txs_mp_v2::TxsMpV2Db::<B>::open(B::gen_backend_conf(
+            crate::databases::txs_mp_v2::TxsMpV2Db::<B>::NAME,
+            profile_path_opt,
+        ))?,
+    };
+    Ok((bc_db, dbs))
+}
+
+pub trait BackendConf: Backend {
+    fn gen_backend_conf(
+        db_name: &'static str,
+        profile_path_opt: Option<&Path>,
+    ) -> <Self as Backend>::Conf;
+}
+
+impl BackendConf for Mem {
+    #[inline(always)]
+    fn gen_backend_conf(_db_name: &'static str, _profile_path_opt: Option<&Path>) -> MemConf {
+        MemConf::default()
+    }
+}
+
+/*impl BackendConf for Lmdb {
+    #[inline(always)]
+    fn gen_backend_conf(db_name: &'static str, profile_path_opt: Option<&Path>) -> LmdbConf {
+        let conf = LmdbConf::default();
+        if let Some(data_path) = profile_path_opt {
+            conf.folder_path(data_path.join(format!("data/{}_lmdb", db_name)))
+        } else {
+            let random = rand::random::<u128>();
+            conf.folder_path(PathBuf::from(format!(
+                "/dev/shm/duniter/_{}/{}_lmdb",
+                random, db_name
+            )))
+            .temporary(true)
+        }
+    }
+}*/
+
+impl BackendConf for Sled {
+    #[inline(always)]
+    fn gen_backend_conf(db_name: &'static str, profile_path_opt: Option<&Path>) -> SledConf {
+        let mut conf = SledConf::default().flush_every_ms(Some(10_000));
+        conf = match db_name {
+            "bc_v2" => {
+                if let Ok(compression_level) = std::env::var("DUNITER_BC_DB_COMPRESSION") {
+                    conf.use_compression(true)
+                        .compression_factor(i32::from_str(&compression_level).expect(
+                        "Env var DUNITER_BC_DB_COMPRESSION must be a number beetween 1 and 22 !",
+                    ))
+                } else {
+                    conf.use_compression(false)
+                }
+            }
+            "gva_v1" => {
+                if let Ok(compression_level) = std::env::var("DUNITER_GVA_DB_COMPRESSION") {
+                    conf.use_compression(true)
+                        .compression_factor(i32::from_str(&compression_level).expect(
+                        "Env var DUNITER_GVA_DB_COMPRESSION must be a number beetween 1 and 22 !",
+                    ))
+                } else {
+                    conf.use_compression(false)
+                }
+            }
+            _ => conf.use_compression(false),
+        };
+        if let Some(data_path) = profile_path_opt {
+            conf.path(data_path.join(format!("data/{}_sled", db_name)))
+        } else {
+            conf.temporary(true)
+        }
+    }
+}
diff --git a/dbs/src/values.rs b/dbs/src/values.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a77cb7fd247c3dd9d085da764bc323ca733eb8f2
--- /dev/null
+++ b/dbs/src/values.rs
@@ -0,0 +1,35 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+pub mod block_db;
+pub mod block_head_db;
+pub mod block_meta;
+pub mod block_number_array_db;
+pub mod cindex_db;
+pub mod dunp_head;
+pub mod idty_db;
+pub mod iindex_db;
+pub mod kick_db;
+pub mod mindex_db;
+pub mod peer_card;
+pub mod pubkey_db;
+pub mod sindex_db;
+pub mod source_amount;
+pub mod tx_db;
+pub mod txs;
+pub mod ud_entry_db;
+pub mod utxo;
+pub mod wallet_db;
+pub mod wallet_script_with_sa;
diff --git a/dbs/src/values/block_db.rs b/dbs/src/values/block_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c288cda97974ec5f519938f1b5bc76a813f8cdd7
--- /dev/null
+++ b/dbs/src/values/block_db.rs
@@ -0,0 +1,152 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Debug)]
+pub enum BlockDbEnum {
+    BlockDbV1(BlockDbV1),
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct BlockDbV1 {
+    pub version: u64,
+    pub number: u64,
+    pub currency: String,
+    pub hash: String,
+    pub signature: String,
+    #[serde(rename = "inner_hash")]
+    pub inner_hash: String,
+    pub previous_hash: Option<String>,
+    pub issuer: String,
+    pub previous_issuer: Option<String>,
+    pub time: u64,
+    pub pow_min: u64,
+    #[serde(rename = "unitbase")]
+    pub unit_base: u64,
+    pub members_count: u64,
+    pub issuers_count: u64,
+    pub issuers_frame: u64,
+    pub issuers_frame_var: i64,
+    pub identities: Vec<String>,
+    pub joiners: Vec<String>,
+    pub actives: Vec<String>,
+    pub leavers: Vec<String>,
+    pub revoked: Vec<String>,
+    pub excluded: Vec<String>,
+    pub certifications: Vec<String>,
+    pub transactions: Vec<TransactionInBlockDbV1>,
+    pub median_time: u64,
+    pub nonce: u64,
+    pub fork: bool,
+    pub parameters: String,
+    pub monetary_mass: u64,
+    pub dividend: Option<u64>,
+    #[serde(rename = "UDTime")]
+    pub ud_time: Option<u64>,
+    #[serde(rename = "writtenOn")]
+    pub written_on: Option<u64>,
+    #[serde(rename = "written_on")]
+    pub written_on_str: String,
+    pub wrong: bool,
+}
+
+impl AsBytes for BlockDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json = serde_json::to_string(self).expect("unreachable");
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for BlockDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct TransactionInBlockDbV1 {
+    version: u64,
+    currency: String,
+    #[serde(rename = "locktime")]
+    lock_time: u64,
+    hash: Option<String>,
+    blockstamp: String,
+    blockstamp_time: u64,
+    issuers: SmallVec<[String; 1]>,
+    inputs: SmallVec<[String; 4]>,
+    outputs: SmallVec<[String; 2]>,
+    unlocks: SmallVec<[String; 4]>,
+    signatures: SmallVec<[String; 1]>,
+    comment: String,
+}
+
+// V2
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct BlockDbV2(pub dubp::block::DubpBlockV10);
+
+impl AsBytes for BlockDbV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let bytes = bincode::serialize(self).unwrap_or_else(|_| unreachable!());
+        f(bytes.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockDbV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(&bytes).map_err(|e| CorruptedBytes(format!("{}: '{:?}'", e, bytes)))
+    }
+}
+
+impl ToDumpString for BlockDbV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockDbV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        serde_json::from_str(source).map_err(|e| FromExplorerValueErr(e.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/block_head_db.rs b/dbs/src/values/block_head_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..97c3f6a04481fe4aad4bb1fe7e7bcbd431896835
--- /dev/null
+++ b/dbs/src/values/block_head_db.rs
@@ -0,0 +1,85 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct BlockHeadDbV1 {
+    pub version: u64,
+    pub currency: Option<String>,
+    #[serde(rename = "bsize")]
+    pub block_size: u64,
+    pub avg_block_size: u64,
+    pub ud_time: u64,
+    pub ud_reeval_time: u64,
+    pub mass_reeval: u64,
+    pub mass: u64,
+    pub hash: String,
+    pub previous_hash: Option<String>,
+    pub previous_issuer: Option<String>,
+    pub issuer: String,
+    pub time: u64,
+    pub median_time: u64,
+    pub number: u64,
+    pub pow_min: u64,
+    pub diff_number: u64,
+    pub issuers_count: u64,
+    pub issuers_frame: u64,
+    pub issuers_frame_var: i64,
+    pub issuer_diff: u64,
+    pub pow_zeros: u64,
+    pub pow_remainder: u64,
+    pub speed: f64,
+    pub unit_base: u64,
+    pub members_count: u64,
+    pub dividend: u64,
+    #[serde(rename = "new_dividend")]
+    pub new_dividend: Option<u64>,
+    pub issuer_is_member: bool,
+}
+
+impl AsBytes for BlockHeadDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockHeadDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for BlockHeadDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockHeadDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/block_meta.rs b/dbs/src/values/block_meta.rs
new file mode 100644
index 0000000000000000000000000000000000000000..949a644d963971933e917f129aa9adb09dcd9f30
--- /dev/null
+++ b/dbs/src/values/block_meta.rs
@@ -0,0 +1,133 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use dubp::block::DubpBlockV10;
+
+use crate::*;
+
+const BLOCK_META_SERIALIZED_SIZE: usize = 323;
+
+#[derive(Clone, Copy, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct BlockMetaV2 {
+    pub version: u64,                   // 8
+    pub number: u32,                    // 4
+    pub hash: Hash,                     // 32
+    pub signature: Signature,           // 64
+    pub inner_hash: Hash,               // 32
+    pub previous_hash: Hash,            // 32
+    pub issuer: PublicKey,              // 33
+    pub previous_issuer: PublicKey,     // 33
+    pub time: u64,                      // 8
+    pub pow_min: u32,                   // 4
+    pub members_count: u64,             // 8
+    pub issuers_count: u32,             // 4
+    pub issuers_frame: u64,             // 8
+    pub issuers_frame_var: i64,         // 8
+    pub median_time: u64,               // 8
+    pub nonce: u64,                     // 8
+    pub monetary_mass: u64,             // 8
+    pub unit_base: u32,                 // 4
+    pub dividend: Option<SourceAmount>, // 17 -> TOTAL SIZE == 335 bytes
+}
+impl BlockMetaV2 {
+    pub fn blockstamp(&self) -> Blockstamp {
+        Blockstamp {
+            number: BlockNumber(self.number),
+            hash: BlockHash(self.hash),
+        }
+    }
+}
+
+impl AsBytes for BlockMetaV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let mut buffer = [0u8; BLOCK_META_SERIALIZED_SIZE];
+        bincode::serialize_into(&mut buffer[..], self).unwrap_or_else(|_| unreachable!());
+        f(buffer.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockMetaV2 {
+    type Err = bincode::Error;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(bytes)
+    }
+}
+
+impl ToDumpString for BlockMetaV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockMetaV2 {
+    fn from_explorer_str(json_str: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        serde_json::from_str(&json_str)
+            .map_err(|e| FromExplorerValueErr(format!("{}: '{}'", e, json_str).into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use unwrap::unwrap;
+
+    #[test]
+    fn block_meta_v2_as_bytes() {
+        assert_eq!(
+            unwrap!(bincode::serialized_size(&BlockMetaV2 {
+                dividend: Some(SourceAmount::new(42, 0)),
+                ..Default::default()
+            })),
+            BLOCK_META_SERIALIZED_SIZE as u64
+        );
+        let bloc_meta = BlockMetaV2::default();
+
+        let bm2_res = bloc_meta.as_bytes(|bytes| unwrap!(BlockMetaV2::from_bytes(bytes)));
+
+        assert_eq!(bm2_res, bloc_meta);
+    }
+}
+
+impl From<&DubpBlockV10> for BlockMetaV2 {
+    fn from(block: &DubpBlockV10) -> Self {
+        use dubp::block::prelude::DubpBlockTrait;
+        BlockMetaV2 {
+            version: 10,
+            number: block.number().0,
+            hash: block.hash().0,
+            signature: block.signature(),
+            inner_hash: block.inner_hash(),
+            previous_hash: block.previous_hash(),
+            issuer: block.issuer(),
+            previous_issuer: PublicKey::default(),
+            time: block.local_time(),
+            pow_min: block.pow_min() as u32,
+            members_count: block.members_count() as u64,
+            issuers_count: block.issuers_count() as u32,
+            issuers_frame: block.issuers_frame() as u64,
+            issuers_frame_var: 0,
+            median_time: block.common_time(),
+            nonce: block.nonce(),
+            monetary_mass: block.monetary_mass(),
+            dividend: block.dividend(),
+            unit_base: block.unit_base() as u32,
+        }
+    }
+}
diff --git a/dbs/src/values/block_number_array_db.rs b/dbs/src/values/block_number_array_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e4b5f0c36a7363ea613219cd27e45a35c60f2fd6
--- /dev/null
+++ b/dbs/src/values/block_number_array_db.rs
@@ -0,0 +1,53 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct BlockNumberArrayV1(pub SmallVec<[BlockNumber; 1]>);
+
+impl AsBytes for BlockNumberArrayV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json_string = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(format!("[{}]", json_string).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockNumberArrayV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        //println!("json_str='{}'", &json_str);
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for BlockNumberArrayV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockNumberArrayV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/cindex_db.rs b/dbs/src/values/cindex_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ded4657e56e00a9d7bee54458e0047a8b4ab03f7
--- /dev/null
+++ b/dbs/src/values/cindex_db.rs
@@ -0,0 +1,98 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct CIndexDbV1 {
+    pub received: SmallVec<[String; 10]>,
+    pub issued: Vec<CIndexLineDbV1>,
+}
+
+impl AsBytes for CIndexDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json_string = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(format!("[{}]", json_string).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for CIndexDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        //println!("json_str='{}'", &json_str);
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for CIndexDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for CIndexDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct CIndexLineDbV1 {
+    pub op: String,
+    #[serde(rename = "writtenOn")]
+    pub written_on: Option<u64>,
+    #[serde(rename = "written_on")]
+    pub written_on_str: String,
+    pub issuer: String,
+    pub receiver: String,
+    #[serde(rename = "created_on")]
+    pub created_on: u64,
+    pub sig: Option<String>,
+    #[serde(rename = "chainable_on")]
+    pub chainable_on: Option<u64>,
+    #[serde(rename = "replayable_on")]
+    pub replayable_on: Option<u64>,
+    #[serde(rename = "expires_on")]
+    pub expires_on: Option<u64>,
+    #[serde(rename = "expired_on")]
+    pub expired_on: u64,
+    pub unchainables: Option<u64>,
+    pub age: Option<u64>,
+    pub stock: Option<u64>,
+    pub from_member: Option<bool>,
+    pub to_member: Option<bool>,
+    pub to_newcomer: Option<bool>,
+    pub to_leaver: Option<bool>,
+    pub is_replay: Option<bool>,
+    pub is_replayable: Option<bool>,
+    #[serde(rename = "sigOK")]
+    pub sig_ok: Option<bool>,
+    #[serde(rename = "created_on_ref")]
+    pub created_on_ref: Option<CreatedOnRef>,
+}
+
+#[derive(Clone, Copy, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct CreatedOnRef {
+    pub median_time: u64,
+}
diff --git a/dbs/src/values/dunp_head.rs b/dbs/src/values/dunp_head.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bf42b21a41f07c1d83f67af91c14dda2ff72ddaa
--- /dev/null
+++ b/dbs/src/values/dunp_head.rs
@@ -0,0 +1,124 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct DunpHeadDbV1 {
+    pub api: String,
+    pub pubkey: PublicKey,
+    pub blockstamp: Blockstamp,
+    pub software: String,
+    pub software_version: String,
+    pub pow_prefix: u32,
+    pub free_member_room: u32,
+    pub free_mirror_room: u32,
+    pub signature: Signature,
+}
+
+impl DunpHeadDbV1 {
+    pub fn from_stringified(message_v2: &str, signature: &str) -> KvResult<(DunpNodeIdV1Db, Self)> {
+        let signature =
+            Signature::from_base64(signature).map_err(|e| KvError::DeserError(e.into()))?;
+
+        let strs: SmallVec<[&str; 11]> = message_v2.split(':').collect();
+        if strs.len() < 11 {
+            return Err(KvError::DeserError(
+                "DunpHeadDbV1::from_stringified(): invalid message_v2".into(),
+            ));
+        }
+
+        let uuid = u32::from_str_radix(strs[5], 16).map_err(|e| KvError::DeserError(e.into()))?;
+        let pubkey = PublicKey::from_base58(strs[3]).map_err(|e| KvError::DeserError(e.into()))?;
+        let blockstamp =
+            Blockstamp::from_str(strs[4]).map_err(|e| KvError::DeserError(e.into()))?;
+
+        Ok((
+            DunpNodeIdV1Db::new(uuid, pubkey),
+            DunpHeadDbV1 {
+                api: strs[0].to_owned(),
+                pubkey,
+                blockstamp,
+                software: strs[6].to_owned(),
+                software_version: strs[7].to_owned(),
+                pow_prefix: u32::from_str(strs[8]).map_err(|e| KvError::DeserError(e.into()))?,
+                free_member_room: u32::from_str(strs[9])
+                    .map_err(|e| KvError::DeserError(e.into()))?,
+                free_mirror_room: u32::from_str(strs[10])
+                    .map_err(|e| KvError::DeserError(e.into()))?,
+                signature,
+            },
+        ))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_dunp_head_from_stringified() -> KvResult<()> {
+        let message = "WS2POCAIC:HEAD:2:GX1nYVburxeaVP1SCNuhVKwNy6M2h6wPamHhyoSF4Ccn:379783-0000001BB2B88D077605C1330CA60AA222624FAA3BA60566D6CA51A9122376F7:882a5ad1:duniter:1.8.1:1:1:1";
+        let sig = "qBvJ7JZ4i8tKeItmZ/lurzr5O2/jKnB1reoIjEIl5x6sqbAhVsVsHut85yQoP30tURGfVX5FwMhCuC4DvCSFCg==";
+        let (node_id, head) = DunpHeadDbV1::from_stringified(message, sig)?;
+
+        assert_eq!(&format!("{:x}", node_id.get_uuid()), "882a5ad1");
+        assert_eq!(
+            &node_id.get_pubkey().to_string(),
+            "GX1nYVburxeaVP1SCNuhVKwNy6M2h6wPamHhyoSF4Ccn"
+        );
+        assert_eq!(&head.api, "WS2POCAIC");
+        assert_eq!(
+            &head.pubkey.to_string(),
+            "GX1nYVburxeaVP1SCNuhVKwNy6M2h6wPamHhyoSF4Ccn"
+        );
+        assert_eq!(
+            &head.blockstamp.to_string(),
+            "379783-0000001BB2B88D077605C1330CA60AA222624FAA3BA60566D6CA51A9122376F7"
+        );
+        Ok(())
+    }
+}
+
+impl AsBytes for DunpHeadDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let bytes = bincode::serialize(self).unwrap_or_else(|_| unreachable!());
+        f(bytes.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for DunpHeadDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(&bytes).map_err(|e| CorruptedBytes(format!("{}: '{:?}'", e, bytes)))
+    }
+}
+
+impl ToDumpString for DunpHeadDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for DunpHeadDbV1 {
+    fn from_explorer_str(_source: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        unimplemented!()
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/idty_db.rs b/dbs/src/values/idty_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..504f79950fb8b96cda2b12d5af30923ac0110c46
--- /dev/null
+++ b/dbs/src/values/idty_db.rs
@@ -0,0 +1,52 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
+pub struct IdtyDbV2 {
+    pub is_member: bool,
+    pub username: String,
+}
+
+impl AsBytes for IdtyDbV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(&bincode::serialize(&self).unwrap_or_else(|_| unreachable!()))
+    }
+}
+
+impl kv_typed::prelude::FromBytes for IdtyDbV2 {
+    type Err = bincode::Error;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(bytes)
+    }
+}
+
+impl ToDumpString for IdtyDbV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for IdtyDbV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        serde_json::from_str(source).map_err(|e| FromExplorerValueErr(e.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(&self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/iindex_db.rs b/dbs/src/values/iindex_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..947494fbd0400b35297077a1495fcd0b72c0929f
--- /dev/null
+++ b/dbs/src/values/iindex_db.rs
@@ -0,0 +1,81 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct IIndexDbV1(pub SmallVec<[IIndexLineDbV1; 1]>);
+
+impl AsBytes for IIndexDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json_string = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(format!("[{}]", json_string).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for IIndexDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        //println!("json_str='{}'", &json_str);
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for IIndexDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for IIndexDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct IIndexLineDbV1 {
+    pub op: String,
+    #[serde(rename = "writtenOn")]
+    pub written_on: Option<u64>,
+    #[serde(rename = "written_on")]
+    pub written_on_str: String,
+    pub uid: Option<String>,
+    #[serde(rename = "pub")]
+    pub pubkey: String,
+    pub hash: Option<String>,
+    pub sig: Option<String>,
+    #[serde(rename = "created_on")]
+    pub created_on: Option<String>,
+    pub member: Option<bool>,
+    pub was_member: Option<bool>,
+    pub kick: Option<bool>,
+    #[serde(rename = "wotb_id")]
+    pub wotb_id: Option<usize>,
+    pub age: Option<u64>,
+    pub pub_unique: Option<bool>,
+    pub excluded_is_member: Option<bool>,
+    pub is_being_kicked: Option<bool>,
+    pub uid_unique: Option<bool>,
+    pub has_to_be_excluded: Option<bool>,
+}
diff --git a/dbs/src/values/kick_db.rs b/dbs/src/values/kick_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b616cdc756df211726b08bd413a00508a975c829
--- /dev/null
+++ b/dbs/src/values/kick_db.rs
@@ -0,0 +1,55 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct KickDbV1 {
+    on: Option<u64>,          // The next time that the identity must be kicked
+    done: SmallVec<[u64; 4]>, // The reversion history
+}
+
+impl AsBytes for KickDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for KickDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for KickDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for KickDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/mindex_db.rs b/dbs/src/values/mindex_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..eb7d42a35d5471c7dc93e5f73f6c78a929d7378b
--- /dev/null
+++ b/dbs/src/values/mindex_db.rs
@@ -0,0 +1,105 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct MIndexDbV1(pub SmallVec<[MIndexLineDbV1; 1]>);
+
+impl AsBytes for MIndexDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json_string = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(format!("[{}]", json_string).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for MIndexDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        //println!("json_str='{}'", &json_str);
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for MIndexDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for MIndexDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct MIndexLineDbV1 {
+    pub op: String,
+    #[serde(rename = "writtenOn")]
+    pub written_on: Option<u64>,
+    #[serde(rename = "written_on")]
+    pub written_on_str: String,
+    #[serde(rename = "pub")]
+    pub pubkey: String,
+    pub created_on: Option<String>,
+    #[serde(rename = "type")]
+    pub r#type: Option<String>,
+    #[serde(rename = "expires_on")]
+    pub expires_on: Option<u64>,
+    #[serde(rename = "expired_on")]
+    pub expired_on: Option<u64>,
+    pub revocation: Option<String>,
+    #[serde(rename = "revokes_on")]
+    pub revokes_on: Option<u64>,
+    #[serde(rename = "chainable_on")]
+    pub chainable_on: Option<u64>,
+    #[serde(rename = "revoked_on")]
+    pub revoked_on: Option<String>,
+    pub leaving: Option<bool>,
+    pub age: Option<u64>,
+    pub is_being_revoked: Option<bool>,
+    pub unchainables: Option<u64>,
+    pub number_following: Option<bool>,
+    #[serde(rename = "distanceOK")]
+    pub distance_ok: Option<bool>,
+    pub on_revoked: Option<bool>,
+    pub joins_twice: Option<bool>,
+    pub enough_certs: Option<bool>,
+    pub leaver_is_member: Option<bool>,
+    pub active_is_member: Option<bool>,
+    pub revoked_is_member: Option<bool>,
+    pub already_revoked: Option<bool>,
+    #[serde(rename = "revocationSigOK")]
+    pub revocation_sig_ok: Option<bool>,
+    #[serde(rename = "created_on_ref")]
+    pub created_on_ref: Option<BlockstampTimed>,
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct BlockstampTimed {
+    pub median_time: u64,
+    pub number: u32,
+    pub hash: String,
+}
diff --git a/dbs/src/values/peer_card.rs b/dbs/src/values/peer_card.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ee08696d7a9afd211e75c4ba62ef9fed8673c810
--- /dev/null
+++ b/dbs/src/values/peer_card.rs
@@ -0,0 +1,58 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct PeerCardDbV1 {
+    pub version: u32,
+    pub currency: String,
+    pub pubkey: String,
+    pub blockstamp: String,
+    pub endpoints: Vec<String>,
+    pub status: String,
+    pub signature: String,
+}
+
+impl AsBytes for PeerCardDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let bytes = bincode::serialize(self).unwrap_or_else(|_| unreachable!());
+        f(bytes.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PeerCardDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(&bytes).map_err(|e| CorruptedBytes(format!("{}: '{:?}'", e, bytes)))
+    }
+}
+
+impl ToDumpString for PeerCardDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for PeerCardDbV1 {
+    fn from_explorer_str(_source: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        unimplemented!()
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/pubkey_db.rs b/dbs/src/values/pubkey_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7c16eedd665c8d11cd430e50927c152a4888667c
--- /dev/null
+++ b/dbs/src/values/pubkey_db.rs
@@ -0,0 +1,149 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+// V1
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct PublicKeySingletonDbV1(pub PublicKey);
+
+impl AsBytes for PublicKeySingletonDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(format!("[\"{}\"]", self.0).as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PublicKeySingletonDbV1 {
+    type Err = BaseConversionError;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let mut pubkey_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+
+        pubkey_str = &pubkey_str[2..pubkey_str.len() - 2];
+        Ok(Self(PublicKey::from_base58(pubkey_str)?))
+    }
+}
+
+impl ToDumpString for PublicKeySingletonDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for PublicKeySingletonDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Ok(Self(
+            PublicKey::from_base58(source).map_err(|e| FromExplorerValueErr(e.into()))?,
+        ))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::String(self.0.to_base58()))
+    }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
+pub struct PublicKeyArrayDbV1(pub SmallVec<[PublicKey; 8]>);
+
+impl AsBytes for PublicKeyArrayDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let vec_pub_str = self
+            .0
+            .iter()
+            .map(|pubkey| pubkey.to_base58())
+            .collect::<SmallVec<[String; 8]>>();
+        let json = serde_json::to_string(&vec_pub_str).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PublicKeyArrayDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        let vec_pub_str: SmallVec<[String; 8]> = serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))?;
+        Ok(Self(
+            vec_pub_str
+                .into_iter()
+                .map(|pub_str| {
+                    PublicKey::from_base58(&pub_str).map_err(|e| CorruptedBytes(e.to_string()))
+                })
+                .collect::<std::result::Result<SmallVec<[PublicKey; 8]>, Self::Err>>()?,
+        ))
+    }
+}
+
+impl ToDumpString for PublicKeyArrayDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for PublicKeyArrayDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::Array(
+            self.0
+                .iter()
+                .map(|pubkey| serde_json::Value::String(pubkey.to_base58()))
+                .collect(),
+        ))
+    }
+}
+
+// V2
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct PubKeyValV2(pub PublicKey);
+
+impl AsBytes for PubKeyValV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PubKeyValV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        Ok(PubKeyValV2(PublicKey::try_from(bytes).map_err(|e| {
+            CorruptedBytes(format!("{}: {:?}", e, bytes))
+        })?))
+    }
+}
+
+impl ToDumpString for PubKeyValV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for PubKeyValV2 {
+    fn from_explorer_str(pubkey_str: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        Ok(PubKeyValV2(PublicKey::from_base58(&pubkey_str).map_err(
+            |e| FromExplorerValueErr(format!("{}: {}", e, pubkey_str).into()),
+        )?))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::String(self.0.to_base58()))
+    }
+}
diff --git a/dbs/src/values/sindex_db.rs b/dbs/src/values/sindex_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..4d5b9f20477d171e9f96bce5a6fc3a9a4cc16b63
--- /dev/null
+++ b/dbs/src/values/sindex_db.rs
@@ -0,0 +1,123 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SIndexDBV1 {
+    pub src_type: String,
+    pub tx: Option<String>,
+    pub identifier: String,
+    pub pos: u32,
+    #[serde(rename = "created_on")]
+    pub created_on: Option<String>,
+    #[serde(rename = "written_time")]
+    pub written_time: u64,
+    #[serde(rename = "locktime")]
+    pub lock_time: u64,
+    pub unlock: Option<String>,
+    pub amount: u32,
+    pub base: u32,
+    pub conditions: String,
+    pub consumed: bool,
+    pub tx_obj: TransactionInBlockDbV1,
+    pub age: u64,
+    #[serde(rename = "type")]
+    pub type_: Option<String>,
+    pub available: Option<bool>,
+    pub is_locked: Option<bool>,
+    pub is_time_locked: Option<bool>,
+}
+
+impl AsBytes for SIndexDBV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for SIndexDBV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for SIndexDBV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for SIndexDBV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
+pub struct SourceKeyArrayDbV1(pub SmallVec<[SourceKeyV1; 8]>);
+
+impl AsBytes for SourceKeyArrayDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let vec_pub_str = self
+            .0
+            .iter()
+            .map(|source_key| source_key.to_string())
+            .collect::<SmallVec<[String; 8]>>();
+        let json = serde_json::to_string(&vec_pub_str).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for SourceKeyArrayDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        let vec_source_key_str: SmallVec<[String; 8]> = serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))?;
+        Ok(Self(
+            vec_source_key_str
+                .into_iter()
+                .map(|source_key_str| SourceKeyV1::from_bytes(source_key_str.as_bytes()))
+                .collect::<std::result::Result<SmallVec<[SourceKeyV1; 8]>, Self::Err>>()?,
+        ))
+    }
+}
+
+impl ToDumpString for SourceKeyArrayDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for SourceKeyArrayDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/source_amount.rs b/dbs/src/values/source_amount.rs
new file mode 100644
index 0000000000000000000000000000000000000000..331493f7d12aed0c60bb92e7e96b3ce9f3bf2ec4
--- /dev/null
+++ b/dbs/src/values/source_amount.rs
@@ -0,0 +1,67 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Default, PartialEq)]
+pub struct SourceAmountValV2(pub SourceAmount);
+
+impl AsBytes for SourceAmountValV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        use zerocopy::AsBytes as _;
+        f(self.0.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for SourceAmountValV2 {
+    type Err = LayoutVerifiedErr;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let layout = zerocopy::LayoutVerified::<_, SourceAmount>::new(bytes)
+            .ok_or(LayoutVerifiedErr(stringify!(SourceAmount)))?;
+        Ok(Self(*layout))
+    }
+}
+
+impl ToDumpString for SourceAmountValV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for SourceAmountValV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        let mut source = source.split(':');
+        let amount_str = source
+            .next()
+            .ok_or_else(|| FromExplorerValueErr("Missing amount".into()))?;
+        let base_str = source
+            .next()
+            .ok_or_else(|| FromExplorerValueErr("Missing base".into()))?;
+        let amount = i64::from_str(amount_str)
+            .map_err(|e| FromExplorerValueErr(format!("Invalid amount: {}", e).into()))?;
+        let base = i64::from_str(base_str)
+            .map_err(|e| FromExplorerValueErr(format!("Invalid base: {}", e).into()))?;
+        Ok(Self(SourceAmount::new(amount, base)))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::String(format!(
+            "{}:{}",
+            self.0.amount(),
+            self.0.base()
+        )))
+    }
+}
diff --git a/dbs/src/values/tx_db.rs b/dbs/src/values/tx_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bcc06d1499ff269ed83624dd2d2fcaacd2d031c0
--- /dev/null
+++ b/dbs/src/values/tx_db.rs
@@ -0,0 +1,51 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+use dubp::documents::transaction::TransactionDocumentV10;
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct PendingTxDbV2(pub TransactionDocumentV10);
+
+impl AsBytes for PendingTxDbV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let bytes = bincode::serialize(self).unwrap_or_else(|_| unreachable!());
+        f(bytes.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for PendingTxDbV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(&bytes).map_err(|e| CorruptedBytes(format!("{}: '{:?}'", e, bytes)))
+    }
+}
+
+impl ToDumpString for PendingTxDbV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for PendingTxDbV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/txs.rs b/dbs/src/values/txs.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f150673ef91757568e6420cf52db7af5f5c58a41
--- /dev/null
+++ b/dbs/src/values/txs.rs
@@ -0,0 +1,51 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+use dubp::documents::transaction::TransactionDocumentV10;
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct BlockTxsDbV2(pub SmallVec<[TransactionDocumentV10; 8]>);
+
+impl AsBytes for BlockTxsDbV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let bytes = bincode::serialize(self).unwrap_or_else(|_| unreachable!());
+        f(bytes.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockTxsDbV2 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(&bytes).map_err(|e| CorruptedBytes(format!("{}: '{:?}'", e, bytes)))
+    }
+}
+
+impl ToDumpString for BlockTxsDbV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockTxsDbV2 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        serde_json::from_str(source).map_err(|e| FromExplorerValueErr(e.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/ud_entry_db.rs b/dbs/src/values/ud_entry_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a32efd7749840ca136c10d38b214391252c09050
--- /dev/null
+++ b/dbs/src/values/ud_entry_db.rs
@@ -0,0 +1,79 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct UdEntryDbV1 {
+    #[serde(rename = "pub")]
+    pub pubkey: String,
+    pub member: bool,
+    pub availables: Vec<u32>,
+    pub consumed: Vec<u32>,
+    #[serde(rename = "consumedUDs")]
+    pub consumed_uds: Vec<ConsumedUdDbV1>,
+    pub dividends: Vec<UdAmountDbV1>,
+}
+
+impl AsBytes for UdEntryDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for UdEntryDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for UdEntryDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for UdEntryDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
+#[serde(rename_all = "camelCase")]
+pub struct ConsumedUdDbV1 {
+    pub dividend_number: u32,
+    pub tx_hash: String,
+    pub tx_created_on: String,
+    #[serde(rename = "txLocktime")]
+    pub tx_lock_time: u32,
+    pub dividend: UdAmountDbV1,
+}
+
+#[derive(Clone, Copy, Debug, Default, Deserialize, PartialEq, Serialize)]
+pub struct UdAmountDbV1 {
+    pub amount: u32,
+    pub base: u32,
+}
diff --git a/dbs/src/values/utxo.rs b/dbs/src/values/utxo.rs
new file mode 100644
index 0000000000000000000000000000000000000000..dc5b60e56de686fd24aa56d376df211f82a80392
--- /dev/null
+++ b/dbs/src/values/utxo.rs
@@ -0,0 +1,159 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use std::{collections::HashMap, ops::Deref};
+
+#[derive(
+    Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd, zerocopy::AsBytes, zerocopy::FromBytes,
+)]
+#[repr(transparent)]
+pub struct UtxoValV2([u8; 52]); // 16(SourceAmount) + 32(Hash) + 4(u32)
+impl UtxoValV2 {
+    pub fn new(amount: SourceAmount, tx_hash: Hash, output_index: u32) -> Self {
+        let mut buffer = [0; 52];
+        use zerocopy::AsBytes as _;
+        buffer[..16].copy_from_slice(amount.as_bytes());
+        buffer[16..48].copy_from_slice(tx_hash.as_ref());
+        buffer[48..].copy_from_slice(&output_index.to_le_bytes()[..]);
+        Self(buffer)
+    }
+    pub fn amount(&self) -> &SourceAmount {
+        let layout =
+            zerocopy::LayoutVerified::<_, SourceAmount>::new(&self.0[..16]).expect("dev error");
+
+        unsafe { std::mem::transmute(layout.deref()) }
+    }
+    pub fn tx_hash(&self) -> &Hash {
+        let layout = zerocopy::LayoutVerified::<_, Hash>::new(&self.0[16..48]).expect("dev error");
+
+        unsafe { std::mem::transmute(layout.deref()) }
+    }
+    pub fn output_index(&self) -> u32 {
+        zerocopy::LayoutVerified::<_, zerocopy::U32<byteorder::LittleEndian>>::new(&self.0[48..])
+            .expect("dev error")
+            .get()
+    }
+}
+
+impl Default for UtxoValV2 {
+    fn default() -> Self {
+        UtxoValV2([0u8; 52])
+    }
+}
+
+impl std::fmt::Display for UtxoValV2 {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let amount = self.amount();
+        write!(
+            f,
+            "{}:{}:T:{}:{}",
+            amount.amount(),
+            amount.base(),
+            self.tx_hash(),
+            self.output_index()
+        )
+    }
+}
+
+impl FromStr for UtxoValV2 {
+    type Err = CorruptedBytes;
+
+    fn from_str(_s: &str) -> std::result::Result<Self, Self::Err> {
+        unimplemented!()
+    }
+}
+
+impl AsBytes for UtxoValV2 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.0.as_ref())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for UtxoValV2 {
+    type Err = LayoutVerifiedErr;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let layout = zerocopy::LayoutVerified::<_, UtxoValV2>::new(bytes)
+            .ok_or(LayoutVerifiedErr(stringify!(UtxoValV2)))?;
+        Ok(*layout)
+    }
+}
+
+impl ToDumpString for UtxoValV2 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for UtxoValV2 {
+    fn from_explorer_str(_: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        unimplemented!()
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::String(self.to_string()))
+    }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
+pub struct BlockUtxosV2Db(pub HashMap<UtxoIdV10, WalletScriptWithSourceAmountV1Db>);
+
+impl AsBytes for BlockUtxosV2Db {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(&bincode::serialize(&self).unwrap_or_else(|_| unreachable!()))
+    }
+}
+
+impl kv_typed::prelude::FromBytes for BlockUtxosV2Db {
+    type Err = bincode::Error;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(bytes)
+    }
+}
+
+impl ToDumpString for BlockUtxosV2Db {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for BlockUtxosV2Db {
+    fn from_explorer_str(_: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        unimplemented!()
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn utxo_val_v2() {
+        let amount = SourceAmount::with_base0(42);
+        let tx_hash = Hash::default();
+        let output_index = 3;
+        let utxo_val = UtxoValV2::new(amount, tx_hash, output_index);
+
+        assert_eq!(utxo_val.amount(), &amount);
+        assert_eq!(utxo_val.tx_hash(), &tx_hash);
+        assert_eq!(utxo_val.output_index(), output_index);
+    }
+}
diff --git a/dbs/src/values/wallet_db.rs b/dbs/src/values/wallet_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..465346100ea9beab5c79e3a67f2677c44b69d960
--- /dev/null
+++ b/dbs/src/values/wallet_db.rs
@@ -0,0 +1,55 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct WalletDbV1 {
+    pub conditions: String,
+    pub balance: u64,
+}
+
+impl AsBytes for WalletDbV1 {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        let json = serde_json::to_string(self).unwrap_or_else(|_| unreachable!());
+        f(json.as_bytes())
+    }
+}
+
+impl kv_typed::prelude::FromBytes for WalletDbV1 {
+    type Err = CorruptedBytes;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        let json_str = std::str::from_utf8(bytes).expect("corrupted db : invalid utf8 bytes");
+        serde_json::from_str(&json_str)
+            .map_err(|e| CorruptedBytes(format!("{}: '{}'", e, json_str)))
+    }
+}
+
+impl ToDumpString for WalletDbV1 {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for WalletDbV1 {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Self::from_bytes(source.as_bytes()).map_err(|e| FromExplorerValueErr(e.0.into()))
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/src/values/wallet_script_with_sa.rs b/dbs/src/values/wallet_script_with_sa.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5d40dd95a54b1c3497008040219cfdfd1c09b6d7
--- /dev/null
+++ b/dbs/src/values/wallet_script_with_sa.rs
@@ -0,0 +1,52 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
+pub struct WalletScriptWithSourceAmountV1Db {
+    pub wallet_script: WalletScriptV10,
+    pub source_amount: SourceAmount,
+}
+
+impl AsBytes for WalletScriptWithSourceAmountV1Db {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(&bincode::serialize(&self).unwrap_or_else(|_| unreachable!()))
+    }
+}
+
+impl kv_typed::prelude::FromBytes for WalletScriptWithSourceAmountV1Db {
+    type Err = bincode::Error;
+
+    fn from_bytes(bytes: &[u8]) -> std::result::Result<Self, Self::Err> {
+        bincode::deserialize(bytes)
+    }
+}
+
+impl ToDumpString for WalletScriptWithSourceAmountV1Db {
+    fn to_dump_string(&self) -> String {
+        todo!()
+    }
+}
+
+#[cfg(feature = "explorer")]
+impl ExplorableValue for WalletScriptWithSourceAmountV1Db {
+    fn from_explorer_str(_: &str) -> std::result::Result<Self, FromExplorerValueErr> {
+        unimplemented!()
+    }
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        serde_json::to_value(self).map_err(|e| KvError::DeserError(e.into()))
+    }
+}
diff --git a/dbs/tests/test_explorer.rs b/dbs/tests/test_explorer.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2dabf7ef4255b1c241d85a21dcf9cce799e484fe
--- /dev/null
+++ b/dbs/tests/test_explorer.rs
@@ -0,0 +1,253 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#[cfg(feature = "explorer")]
+mod explorer {
+    use dubp::common::crypto::keys::ed25519::PublicKey;
+    use dubp::common::crypto::keys::PublicKey as _;
+    //use dubp::common::prelude::*;
+    use duniter_dbs::kv_typed::prelude::*;
+    use duniter_dbs::kv_typed::regex;
+    use duniter_dbs::prelude::*;
+    use duniter_dbs::smallvec::smallvec;
+    use duniter_dbs::{
+        databases::bc_v1::{BcV1Db, BcV1DbWritable},
+        PublicKeySingletonDbV1, UidKeyV1,
+    };
+    use std::{num::NonZeroUsize, str::FromStr};
+    use tempfile::TempDir;
+    use unwrap::unwrap;
+
+    const COLLECTION_NAME: &str = "uids";
+
+    fn stringify_json_value_test(v: serde_json::Value) -> serde_json::Value {
+        v
+    }
+
+    #[test]
+    fn explorer_test_leveldb() -> anyhow::Result<()> {
+        let tmp_dir = unwrap!(TempDir::new());
+
+        let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(tmp_dir.path().to_owned()))?;
+
+        explorer_test(&db)
+    }
+
+    #[test]
+    fn explorer_test_sled() -> anyhow::Result<()> {
+        let db = BcV1Db::<Sled>::open(SledConf::new().temporary(true))?;
+
+        explorer_test(&db)
+    }
+
+    fn explorer_test<B: Backend>(db: &BcV1Db<B>) -> anyhow::Result<()> {
+        // Defines test data
+        let k1 = unwrap!(UidKeyV1::from_str("toto"));
+        let k2 = unwrap!(UidKeyV1::from_str("titi"));
+        let v1 = PublicKeySingletonDbV1(unwrap!(PublicKey::from_base58(
+            "ByE9TU6qhktHYYVAqeTcWcaULBx151siQLyL3TrKvY85"
+        )));
+        let v2 = PublicKeySingletonDbV1(unwrap!(PublicKey::from_base58(
+            "8B5XCAHknsckCkMWeGF9FoGibSNZXF9HtAvzxzg3bSyp"
+        )));
+
+        // Insert test data
+        db.uids_write().upsert(k1, v1)?;
+        db.uids_write().upsert(k2, v2)?;
+
+        // Test action count
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Count,
+            stringify_json_value_test,
+        )??;
+        assert_eq!(ExplorerActionResponse::Count(2), res);
+
+        // Test action get
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Get { key: "unexist" },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(ExplorerActionResponse::Get(None), res);
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Get { key: "toto" },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(
+            ExplorerActionResponse::Get(Some(serde_json::Value::String(
+                "ByE9TU6qhktHYYVAqeTcWcaULBx151siQLyL3TrKvY85".to_owned()
+            ))),
+            res
+        );
+
+        // Test action put
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Put {
+                key: "titu",
+                value: "Bi6ECSc352gdfEvVzGiQuuDQyaTptHkcxooMGTJk14Tr",
+            },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(ExplorerActionResponse::PutOk, res);
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Get { key: "titu" },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(
+            ExplorerActionResponse::Get(Some(serde_json::Value::String(
+                "Bi6ECSc352gdfEvVzGiQuuDQyaTptHkcxooMGTJk14Tr".to_owned()
+            ))),
+            res
+        );
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Count,
+            stringify_json_value_test,
+        )??;
+        assert_eq!(ExplorerActionResponse::Count(3), res);
+
+        // Test action find
+        let range_res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Find {
+                key_min: Some("ti00".to_owned()),
+                key_max: Some("tizz".to_owned()),
+                key_regex: None,
+                value_regex: None,
+                limit: Some(10),
+                reverse: false,
+                step: unsafe { NonZeroUsize::new_unchecked(1) },
+            },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(
+            ExplorerActionResponse::Find(vec![
+                EntryFound {
+                    key: "titi".to_owned(),
+                    value: serde_json::Value::String(
+                        "8B5XCAHknsckCkMWeGF9FoGibSNZXF9HtAvzxzg3bSyp".to_owned()
+                    ),
+                    captures: None,
+                },
+                EntryFound {
+                    key: "titu".to_owned(),
+                    value: serde_json::Value::String(
+                        "Bi6ECSc352gdfEvVzGiQuuDQyaTptHkcxooMGTJk14Tr".to_owned()
+                    ),
+                    captures: None,
+                },
+            ]),
+            range_res
+        );
+
+        // Test action find with limit
+        let range_res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Find {
+                key_min: Some("ti00".to_owned()),
+                key_max: Some("tizz".to_owned()),
+                key_regex: None,
+                value_regex: None,
+                limit: Some(1),
+                reverse: false,
+                step: unsafe { NonZeroUsize::new_unchecked(1) },
+            },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(
+            ExplorerActionResponse::Find(vec![EntryFound {
+                key: "titi".to_owned(),
+                value: serde_json::Value::String(
+                    "8B5XCAHknsckCkMWeGF9FoGibSNZXF9HtAvzxzg3bSyp".to_owned()
+                ),
+                captures: None,
+            }]),
+            range_res
+        );
+
+        // Test action find with limit and reverse
+        let range_res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Find {
+                key_min: Some("ti00".to_owned()),
+                key_max: Some("tizz".to_owned()),
+                key_regex: None,
+                value_regex: None,
+                limit: Some(1),
+                reverse: true,
+                step: unsafe { NonZeroUsize::new_unchecked(1) },
+            },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(
+            ExplorerActionResponse::Find(vec![EntryFound {
+                key: "titu".to_owned(),
+                value: serde_json::Value::String(
+                    "Bi6ECSc352gdfEvVzGiQuuDQyaTptHkcxooMGTJk14Tr".to_owned()
+                ),
+                captures: None,
+            }]),
+            range_res
+        );
+
+        // Test action find with regex capture
+        let range_res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Find {
+                key_min: Some("ti00".to_owned()),
+                key_max: Some("tizz".to_owned()),
+                key_regex: None,
+                value_regex: Some(regex::Regex::new("(E[Cv])[A-Z]").expect("wrong regex")),
+                limit: Some(10),
+                reverse: false,
+                step: unsafe { NonZeroUsize::new_unchecked(1) },
+            },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(
+            ExplorerActionResponse::Find(vec![EntryFound {
+                key: "titu".to_owned(),
+                value: serde_json::Value::String(
+                    "Bi6ECSc352gdfEvVzGiQuuDQyaTptHkcxooMGTJk14Tr".to_owned()
+                ),
+                captures: Some(ValueCaptures(smallvec![
+                    smallvec![Some("EC".to_owned())],
+                    smallvec![Some("Ev".to_owned())]
+                ])),
+            }]),
+            range_res
+        );
+
+        // Test action delete
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Delete { key: "toto" },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(ExplorerActionResponse::DeleteOk, res);
+        let res = db.explore(
+            COLLECTION_NAME,
+            ExplorerAction::Get { key: "toto" },
+            stringify_json_value_test,
+        )??;
+        assert_eq!(ExplorerActionResponse::Get(None), res);
+
+        Ok(())
+    }
+}
diff --git a/dbs/tests/test_read_write.rs b/dbs/tests/test_read_write.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d626b963a44e11ad4736de63c3f40bf0ce2ae1fb
--- /dev/null
+++ b/dbs/tests/test_read_write.rs
@@ -0,0 +1,405 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use dubp::common::crypto::keys::ed25519::PublicKey;
+use dubp::common::crypto::keys::PublicKey as _;
+use dubp::common::prelude::*;
+use duniter_dbs::kv_typed::prelude::*;
+use duniter_dbs::{
+    databases::bc_v1::{BcV1Db, BcV1DbReadable, BcV1DbWritable, MainBlocksEvent},
+    BlockDbV1, BlockNumberKeyV1, PublicKeySingletonDbV1, UidKeyV1,
+};
+use kv_typed::channel::TryRecvError;
+use std::str::FromStr;
+use tempfile::TempDir;
+use unwrap::unwrap;
+
+#[test]
+fn write_read_delete_b0_leveldb() -> KvResult<()> {
+    let tmp_dir = unwrap!(TempDir::new());
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(tmp_dir.path().to_owned()))?;
+
+    write_read_delete_b0_test(&db)
+}
+
+#[test]
+fn write_read_delete_b0_sled() -> KvResult<()> {
+    let db = BcV1Db::<Sled>::open(SledConf::new().temporary(true))?;
+
+    write_read_delete_b0_test(&db)
+}
+
+#[test]
+fn iter_test_leveldb() -> KvResult<()> {
+    let tmp_dir = unwrap!(TempDir::new());
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(tmp_dir.path().to_owned()))?;
+
+    write_some_entries_and_iter(&db)
+}
+
+#[test]
+fn iter_test_mem() -> KvResult<()> {
+    let db = BcV1Db::<Mem>::open(MemConf::default())?;
+
+    write_some_entries_and_iter(&db)
+}
+
+#[test]
+fn iter_test_sled() -> KvResult<()> {
+    let db = BcV1Db::<Sled>::open(SledConf::new().temporary(true))?;
+
+    write_some_entries_and_iter(&db)
+}
+
+#[test]
+fn batch_test_leveldb() -> KvResult<()> {
+    let tmp_dir = unwrap!(TempDir::new());
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(tmp_dir.path().to_owned()))?;
+
+    batch_test(&db)
+}
+
+#[test]
+fn batch_test_mem() -> KvResult<()> {
+    let db = BcV1Db::<Mem>::open(MemConf::default())?;
+
+    batch_test(&db)
+}
+
+#[test]
+fn batch_test_sled() -> KvResult<()> {
+    let db = BcV1Db::<Sled>::open(SledConf::new().temporary(true))?;
+
+    batch_test(&db)
+}
+
+fn write_read_delete_b0_test<B: Backend>(db: &BcV1Db<B>) -> KvResult<()> {
+    let main_blocks_reader = db.main_blocks();
+
+    let (subscriber, events_recv) = kv_typed::channel::unbounded();
+
+    main_blocks_reader.subscribe(subscriber)?;
+
+    // Empty db
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(0)))?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |iter| iter.keys().next_res())?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |iter| iter.values().next_res())?,
+        None
+    );
+    if let Err(TryRecvError::Empty) = events_recv.try_recv() {
+    } else {
+        panic!("should not receive event");
+    }
+
+    // Insert b0
+    let b0 = BlockDbV1::default();
+    let main_blocks_writer = db.main_blocks_write();
+    main_blocks_writer.upsert(BlockNumberKeyV1(BlockNumber(0)), b0.clone())?;
+    assert_eq!(
+        main_blocks_reader
+            .get(&BlockNumberKeyV1(BlockNumber(0)))?
+            .as_ref(),
+        Some(&b0)
+    );
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
+        None
+    );
+    main_blocks_reader.iter(.., |iter| {
+        let mut keys_iter = iter.keys();
+        assert_eq!(
+            keys_iter.next_res()?,
+            Some(BlockNumberKeyV1(BlockNumber(0)))
+        );
+        assert_eq!(keys_iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+    main_blocks_reader.iter(.., |iter| {
+        let mut values_iter = iter.values();
+        assert_eq!(values_iter.next_res()?, Some(b0.clone()));
+        assert_eq!(values_iter.next_res()?, None);
+
+        Ok::<(), KvError>(())
+    })?;
+    if let Ok(events) = events_recv.try_recv() {
+        assert_eq!(events.len(), 1);
+        let event = &events[0];
+        assert_eq!(
+            event,
+            &MainBlocksEvent::Upsert {
+                key: BlockNumberKeyV1(BlockNumber(0)),
+                value: b0,
+            },
+        );
+    } else {
+        panic!("should receive event");
+    }
+
+    // Delete b0
+    main_blocks_writer.remove(BlockNumberKeyV1(BlockNumber(0)))?;
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(0)))?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.keys().next_res())?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.values().next_res())?,
+        None
+    );
+    if let Ok(events) = events_recv.try_recv() {
+        assert_eq!(events.len(), 1);
+        let event = &events[0];
+        assert_eq!(
+            event,
+            &MainBlocksEvent::Remove {
+                key: BlockNumberKeyV1(BlockNumber(0)),
+            },
+        );
+    } else {
+        panic!("should receive event");
+    }
+
+    Ok(())
+}
+
+fn write_some_entries_and_iter<B: Backend>(db: &BcV1Db<B>) -> KvResult<()> {
+    let k1 = unwrap!(UidKeyV1::from_str("titi"));
+    let p1 = PublicKeySingletonDbV1(unwrap!(PublicKey::from_base58(
+        "42jMJtb8chXrpHMAMcreVdyPJK7LtWjEeRqkPw4eSEVp"
+    )));
+    let k2 = unwrap!(UidKeyV1::from_str("titu"));
+    let p2 = PublicKeySingletonDbV1(unwrap!(PublicKey::from_base58(
+        "D7CYHJXjaH4j7zRdWngUbsURPnSnjsCYtvo6f8dvW3C"
+    )));
+    let k3 = unwrap!(UidKeyV1::from_str("toto"));
+    let p3 = PublicKeySingletonDbV1(unwrap!(PublicKey::from_base58(
+        "8B5XCAHknsckCkMWeGF9FoGibSNZXF9HtAvzxzg3bSyp"
+    )));
+    let uids_writer = db.uids_write();
+    uids_writer.upsert(k1, p1)?;
+    uids_writer.upsert(k2, p2)?;
+    uids_writer.upsert(k3, p3)?;
+
+    let uids_reader = db.uids();
+    {
+        uids_reader.iter(.., |it| {
+            let mut values_iter_step_2 = it.values().step_by(2);
+
+            assert_eq!(Some(p1), values_iter_step_2.next_res()?);
+            assert_eq!(Some(p3), values_iter_step_2.next_res()?);
+            assert_eq!(None, values_iter_step_2.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(.., |it| {
+            let mut entries_iter_step_2 = it.step_by(2);
+
+            assert_eq!(Some((k1, p1)), entries_iter_step_2.next_res()?);
+            assert_eq!(Some((k3, p3)), entries_iter_step_2.next_res()?);
+            assert_eq!(None, entries_iter_step_2.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(k2.., |mut entries_iter| {
+            assert_eq!(Some((k2, p2)), entries_iter.next_res()?);
+            assert_eq!(Some((k3, p3)), entries_iter.next_res()?);
+            assert_eq!(None, entries_iter.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(..=k2, |mut entries_iter| {
+            assert_eq!(Some((k1, p1)), entries_iter.next_res()?);
+            assert_eq!(Some((k2, p2)), entries_iter.next_res()?);
+            assert_eq!(None, entries_iter.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter_rev(k2.., |mut entries_iter_rev| {
+            assert_eq!(Some((k3, p3)), entries_iter_rev.next_res()?);
+            assert_eq!(Some((k2, p2)), entries_iter_rev.next_res()?);
+            assert_eq!(None, entries_iter_rev.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter_rev(..=k2, |mut entries_iter_rev| {
+            assert_eq!(Some((k2, p2)), entries_iter_rev.next_res()?);
+            assert_eq!(Some((k1, p1)), entries_iter_rev.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter_rev(..=k2, |iter_rev| {
+            let mut keys_iter_rev = iter_rev.keys();
+            assert_eq!(Some(k2), keys_iter_rev.next_res()?);
+            assert_eq!(Some(k1), keys_iter_rev.next_res()?);
+            assert_eq!(None, keys_iter_rev.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+    }
+
+    uids_writer.remove(k3)?;
+
+    uids_reader.iter(.., |it| {
+        let mut keys_iter = it.keys();
+
+        assert_eq!(Some(k1), keys_iter.next_res()?);
+        assert_eq!(Some(k2), keys_iter.next_res()?);
+        assert_eq!(None, keys_iter.next_res()?);
+        Ok::<(), KvError>(())
+    })?;
+
+    Ok(())
+}
+
+fn batch_test<B: Backend>(db: &BcV1Db<B>) -> KvResult<()> {
+    let main_blocks_reader = db.main_blocks();
+
+    let mut batch = db.new_batch();
+
+    let (subscriber, events_recv) = kv_typed::channel::unbounded();
+
+    main_blocks_reader.subscribe(subscriber)?;
+
+    // Empty db
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(0)))?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.keys().next_res())?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.values().next_res())?,
+        None
+    );
+    if let Err(TryRecvError::Empty) = events_recv.try_recv() {
+    } else {
+        panic!("should not receive event");
+    }
+
+    // Insert b0 in batch
+    let b0 = BlockDbV1::default();
+    batch
+        .main_blocks()
+        .upsert(BlockNumberKeyV1(BlockNumber(0)), b0.clone());
+
+    // bo should written in batch
+    assert_eq!(
+        batch.main_blocks().get(&BlockNumberKeyV1(BlockNumber(0))),
+        BatchGet::Updated(&b0)
+    );
+
+    // bo should not written in db
+    assert_eq!(
+        db.main_blocks().get(&BlockNumberKeyV1(BlockNumber(0)))?,
+        None
+    );
+
+    if let Err(TryRecvError::Empty) = events_recv.try_recv() {
+    } else {
+        panic!("should not receive event");
+    }
+
+    // Insert b1 in batch
+    let b1 = BlockDbV1 {
+        number: 1,
+        ..Default::default()
+    };
+    batch
+        .main_blocks()
+        .upsert(BlockNumberKeyV1(BlockNumber(1)), b1.clone());
+
+    // Write batch in db
+    db.write_batch(batch)?;
+
+    // bo should written in db
+    assert_eq!(
+        db.main_blocks()
+            .get(&BlockNumberKeyV1(BlockNumber(0)))?
+            .as_ref(),
+        Some(&b0)
+    );
+    db.main_blocks().iter(.., |it| {
+        let mut keys_iter = it.keys();
+
+        assert_eq!(
+            keys_iter.next_res()?,
+            Some(BlockNumberKeyV1(BlockNumber(0)))
+        );
+        assert_eq!(
+            keys_iter.next_res()?,
+            Some(BlockNumberKeyV1(BlockNumber(1)))
+        );
+        assert_eq!(keys_iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+    db.main_blocks().iter(.., |it| {
+        let mut values_iter = it.values();
+
+        assert_eq!(values_iter.next_res()?.as_ref(), Some(&b0));
+        assert_eq!(values_iter.next_res()?.as_ref(), Some(&b1));
+        assert_eq!(values_iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+    if let Ok(events) = events_recv.try_recv() {
+        assert_eq!(events.len(), 2);
+        assert!(assert_eq_pairs(
+            [&events[0], &events[1]],
+            [
+                &MainBlocksEvent::Upsert {
+                    key: BlockNumberKeyV1(BlockNumber(0)),
+                    value: b0,
+                },
+                &MainBlocksEvent::Upsert {
+                    key: BlockNumberKeyV1(BlockNumber(1)),
+                    value: b1,
+                }
+            ]
+        ));
+    } else {
+        panic!("should receive event");
+    }
+
+    Ok(())
+}
+
+fn assert_eq_pairs<T: PartialEq>(a: [T; 2], b: [T; 2]) -> bool {
+    (a[0] == b[0] && a[1] == b[1]) || (a[1] == b[0] && a[0] == b[1])
+}
diff --git a/dbs/tests/test_tmp_real.rs b/dbs/tests/test_tmp_real.rs
new file mode 100644
index 0000000000000000000000000000000000000000..563a71d032f52f128803a6a85415135eaf6beea8
--- /dev/null
+++ b/dbs/tests/test_tmp_real.rs
@@ -0,0 +1,767 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+/*use dubp_common::crypto::bases::b58::ToBase58 as _;
+use dubp_common::crypto::hashs::Hash;
+use dubp_common::crypto::keys::PublicKey;
+use dubp_common::prelude::*;
+use duniter_dbs::kv_typed::prelude::*;
+use duniter_dbs::*;
+use duniter_dbs::{
+    BcV1Db, BcV1DbReadable, BcV1DbWritable, BlockDbV1, BlockNumberKeyV1, PublicKeySingletonDbV1,
+    Result, UidKeyV1,
+};
+use once_cell::sync::Lazy;
+use std::{path::PathBuf, str::FromStr, sync::Mutex};
+use unwrap::unwrap;
+
+// Empty mutex used to ensure that only one test runs at a time
+static MUTEX: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
+
+//const DB_PATH: &str = "/home/elois/.config/duniter/duniter_default/data";
+const DB_PATH: &str = "/home/elois/Documents/ml/leveldb-archives/g1-317499/leveldb";
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks__() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    //let block52 = db.get::<MainBlocksColV1>(&MainBlockKeyV1(52))?;
+    //println!("{:#?}", block52);
+
+    let current_block_number_opt = db
+        .main_blocks()
+        .iter(..)
+        .keys()
+        .reverse()
+        .next()
+        .transpose()?;
+    if let Some(current_block_number) = current_block_number_opt {
+        println!("current_block_number={:#?}", current_block_number);
+        let current_block = db.main_blocks().get(&current_block_number)?;
+        println!("current_block={:#?}", current_block);
+    }
+
+    /*// Collect all main blocks
+    let entries = db
+        .main_blocks()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, BlockDbV1)>>>()?;
+    println!("entries_len={}", entries.len());*/
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_idty() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_idty().get(&PubKeyKeyV1::all())?);
+    assert!(all.0.len() > 2);
+
+    // Collect all main blocks idty
+    let entries = db
+        .mb_idty()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("identities_count={}", entries.len() - 1);
+    for (k, v) in &entries {
+        if v.0.len() == 2 {
+            println!("{:?}", k.0.as_ref());
+        }
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_certs() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all main blocks idty
+    let entries = db
+        .mb_certs()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("certifications_count={}", entries.len() - 1);
+    for (k, v) in &entries[..10] {
+        if v.0.len() > 1 {
+            println!("{}={:?}", k.0, v.0);
+        }
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_joiners() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_joiners().get(&PubKeyKeyV1::all())?);
+    assert!(all.0.len() > 100);
+
+    // Collect all main blocks joiners
+    let entries = db
+        .mb_joiners()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("joiners_count={}", entries.len() - 1);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_actives() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_actives().get(&PubKeyKeyV1::all())?);
+    assert!(all.0.len() > 100);
+
+    // Collect all main blocks actives
+    let entries = db
+        .mb_actives()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("actives_count={}", entries.len() - 1);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_leavers() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_leavers().get(&PubKeyKeyV1::all())?);
+    assert!(all.0.len() >= 3);
+
+    // Collect all main blocks with leavers
+    let entries = db
+        .mb_leavers()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("leavers_count={}", entries.len() - 1);
+    for (k, v) in entries {
+        println!("{}={:?}", k.0, v.0);
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_excluded() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_excluded().get(&PubKeyKeyV1::all())?);
+    assert!(all.0.len() >= 50);
+
+    // Collect all main blocks with excluded
+    let entries = db
+        .mb_excluded()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("excluded_count={}", entries.len() - 1);
+    /*for (k, v) in entries {
+        println!("{}={:?}", k.0, v.0);
+    }*/
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_revoked() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_revoked().get(&PubKeyAndSigV1::all())?);
+    assert!(all.0.len() >= 20);
+
+    // Collect all main blocks with revoked
+    let entries = db
+        .mb_revoked()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyAndSigV1, BlockNumberArrayV1)>>>()?;
+    println!("revoked_count={}", entries.len() - 1);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_dividend() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_dividends().get(&AllKeyV1)?);
+    assert!(all.0.len() >= 900);
+    println!("blocks with dividend={}", all.0.len());
+    println!("last block with dividend={:?}", all.0.last());
+
+    // Collect all main blocks with dividends
+    let entries = db
+        .mb_dividends()
+        .iter(..)
+        .collect::<KvResult<Vec<(AllKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("dividends_keys={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_main_blocks_transactions() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let all = unwrap!(db.mb_transactions().get(&AllKeyV1)?);
+    assert!(all.0.len() >= 900);
+    println!("blocks with tx={}", all.0.len());
+    println!("last block with tx={:?}", all.0.last());
+
+    // Collect all main blocks with transactions
+    let entries = db
+        .mb_transactions()
+        .iter(..)
+        .collect::<KvResult<Vec<(AllKeyV1, BlockNumberArrayV1)>>>()?;
+    println!("transactions_keys={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_fork_blocks() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    /*let fork_blocks_keys = db
+        .keys_iter::<ForkBlocksColV1, _>(..)
+        .take(1)
+        .collect::<KvResult<Vec<BlockstampKeyV1>>>()?;
+    let one_fork_block = unwrap!(db.get::<ForkBlocksColV1>(&fork_blocks_keys[0])?);
+
+    println!("{:#?}", one_fork_block);*/
+
+    // Collect all fork blocks
+    let entries = db
+        .fork_blocks()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockstampKeyV1, BlockDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_bindex() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all bindex entries
+    let entries = db
+        .bindex()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, BlockHeadDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    //println!("last_bindex={:?}", entries.last());
+    //for (_k, v) in entries {}
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_iindex__() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let iindex_keys = db
+        .iindex()
+        .iter(..)
+        .keys()
+        .take(1)
+        .collect::<KvResult<Vec<PubKeyKeyV1>>>()?;
+    let one_iindex_db = unwrap!(db.iindex().get(&iindex_keys[0])?);
+    assert_eq!(one_iindex_db.0[0].pubkey, iindex_keys[0].0.to_base58());
+
+    //println!("{:#?}", one_iindex_db);
+
+    if let Some(ref hash) = one_iindex_db.0[0].hash {
+        let pubkey = unwrap!(db
+            .iindex_hash()
+            .get(&HashKeyV1(unwrap!(Hash::from_hex(hash))))?);
+        assert_eq!(pubkey.0, iindex_keys[0].0);
+    }
+
+    // Count iindex entries
+    let count = db.iindex().count()?;
+    println!("iindex size={}", count);
+
+    // Count members
+    let count_members = db
+        .iindex()
+        .iter(..)
+        .filter_map(KvResult::ok)
+        .filter(|(_k, v)| v.0[0].member.is_some() && unwrap!(v.0[0].member))
+        .count();
+    println!("count_members={}", count_members);
+
+    // Collect all iindex entries
+    let entries = db
+        .iindex()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, IIndexDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_iindex_hash() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let iindex_entries = db
+        .iindex_hash()
+        .iter(..)
+        .take(3)
+        .collect::<KvResult<Vec<(HashKeyV1, PublicKeySingletonDbV1)>>>()?;
+
+    println!(
+        "(hash, pub)=({:#?},{:#?})",
+        iindex_entries[0].0, iindex_entries[0].1
+    );
+
+    // Collect all iindex/hash entries
+    let entries = db
+        .iindex_hash()
+        .iter(..)
+        .collect::<KvResult<Vec<(HashKeyV1, PublicKeySingletonDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_iindex_kick() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let entries = db
+        .iindex_kick()
+        .iter(..)
+        .take(3)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, KickDbV1)>>>()?;
+
+    println!("(pub, kick)=({:#?},{:#?})", entries[0].0, entries[0].1);
+
+    // Collect all iindex/kick entries
+    let entries = db
+        .iindex_kick()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, KickDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_iindex_written_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all iindex/written_on entries
+    let entries = db
+        .iindex_written_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    println!("entries={:?}", entries);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_uid_col() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let uid_keys = db
+        .uids()
+        .iter(..)
+        .keys()
+        .take(1)
+        .collect::<KvResult<Vec<UidKeyV1>>>()?;
+    let one_pubkey_db = db.uids().get(&uid_keys[0])?;
+
+    println!(
+        "(uid, pubkey) = ({}, {:#?})",
+        uid_keys[0].0.as_str(),
+        one_pubkey_db
+    );
+
+    let start_key = unwrap!(UidKeyV1::from_str("1b"));
+    let end_key = unwrap!(UidKeyV1::from_str("404_not_found"));
+    let uid_index = db
+        .uids()
+        .iter(start_key..end_key)
+        .collect::<KvResult<Vec<(UidKeyV1, PublicKeySingletonDbV1)>>>()?;
+    assert_eq!(
+        uid_index,
+        vec![(
+            unwrap!(UidKeyV1::from_str("1claude1")),
+            PublicKeySingletonDbV1(unwrap!(PublicKey::from_base58(
+                "8B5XCAHknsckCkMWeGF9FoGibSNZXF9HtAvzxzg3bSyp"
+            )))
+        )],
+    );
+
+    // Collect all iindex/uid entries
+    let entries = db
+        .uids()
+        .iter(..)
+        .collect::<KvResult<Vec<(UidKeyV1, PublicKeySingletonDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_mindex__() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    let mindex_keys = db
+        .mindex()
+        .iter(..)
+        .keys()
+        .take(1)
+        .collect::<KvResult<Vec<PubKeyKeyV1>>>()?;
+    let one_mindex_db = unwrap!(db.mindex().get(&mindex_keys[0])?);
+    assert_eq!(one_mindex_db.0[0].pubkey, mindex_keys[0].0.to_base58());
+
+    //println!("{:#?}", one_mindex_db);
+
+    // Count mindex entries
+    let count = db.mindex().count()?;
+    println!("mindex size={}", count);
+
+    // Collect all mindex entries
+    let entries = db
+        .mindex()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, MIndexDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_mindex_expires_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all mindex/expires_on entries
+    let entries = db
+        .mindex_expires_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(TimestampKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    /*for (k, v) in entries {
+        if k.0 == BlockNumber(u32::MAX) {
+            println!("{:?}", v.0)
+        }
+    }*/
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_mindex_revokes_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all mindex/revokes_on entries
+    let entries = db
+        .mindex_revokes_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(TimestampKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_mindex_written_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all mindex/written_on entries
+    let entries = db
+        .mindex_written_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    for (k, v) in entries {
+        if k.0 == BlockNumber(u32::MAX) {
+            println!("{:?}", v.0)
+        }
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_cindex__() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all bindex entries
+    let entries = db
+        .cindex()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, CIndexDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    //println!("last_bindex={:?}", entries.last());
+    for (_k, v) in entries {
+        for cindex_line in v.issued {
+            if cindex_line.created_on_ref.is_some() {
+                println!("cindex_line={:?}", cindex_line)
+            }
+        }
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_cindex_expires_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all cindex/expires_on entries
+    let entries = db
+        .cindex_expires_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_cindex_written_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all cindex/written_on entries
+    let entries = db
+        .cindex_written_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_wallet() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all wallet entries
+    let entries = db
+        .wallet()
+        .iter(..)
+        .collect::<KvResult<Vec<(WalletConditionsV1, WalletDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    let mut max_cond_len = 0;
+    for (k, _v) in entries {
+        if k.0.len() > max_cond_len {
+            max_cond_len = k.0.len();
+            println!("k={}", k.0.as_str());
+        }
+    }
+    println!("max_cond_len={}", max_cond_len);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_dividend() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all level_dividend entries
+    let entries = db
+        .uds()
+        .iter(..)
+        .collect::<KvResult<Vec<(PubKeyKeyV1, UdEntryDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    println!("entries[0]=({:?}, {:?})", entries[0].0, entries[0].1);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_dividend_written_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all level_dividend/level_dividend_trim_index entries
+    let entries = db
+        .uds_trim()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, PublicKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_sindex() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all level_sindex entries
+    let entries = db
+        .sindex()
+        .iter(..)
+        .collect::<KvResult<Vec<(SourceKeyV1, SIndexDBV1)>>>()?;
+    println!("entries_len={}", entries.len());
+
+    println!("entries[0]=({:?}, {:?})", entries[0].0, entries[0].1);
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_sindex_written_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all mindex/written_on entries
+    let entries = db
+        .sindex_written_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, SourceKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    for (k, v) in entries {
+        if k.0 == BlockNumber(u32::MAX) {
+            println!("{:?}", v.0)
+        }
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_sindex_consumed_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+    // Collect all mindex/written_on entries
+    let entries = db
+        .sindex_consumed_on()
+        .iter(..)
+        .collect::<KvResult<Vec<(BlockNumberKeyV1, SourceKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    for (k, v) in entries {
+        println!("{:?} => {:?}", k.0, v.0)
+    }
+
+    Ok(())
+}
+
+#[test]
+#[ignore]
+fn db_v1_sindex_conditions_on() -> Result<()> {
+    let _lock = MUTEX.lock().expect("MUTEX poisoned");
+
+    let db = BcV1Db::<LevelDb>::open(LevelDbConf::path(PathBuf::from(DB_PATH)))?;
+
+    // Collect all mindex/written_on entries
+    let entries = db
+        .sindex_conditions()
+        .iter(..)
+        .collect::<KvResult<Vec<(WalletConditionsV1, SourceKeyArrayDbV1)>>>()?;
+    println!("entries_len={}", entries.len());
+    /*for (k, v) in entries {
+        println!("{:?} => {:?}", k.0, v.0)
+    }*/
+
+    Ok(())
+}*/
diff --git a/deny.toml b/deny.toml
new file mode 100644
index 0000000000000000000000000000000000000000..0ed91ca7b7765821585c8c94db3d0a2576d8a4eb
--- /dev/null
+++ b/deny.toml
@@ -0,0 +1,64 @@
+[advisories]
+ignore = [
+    # Wait to comfy-table upgrade crossterm
+    "RUSTSEC-2020-0091",
+    # generic-array v0.12.3 allowed unsoundly extending lifetimes
+    # but used only on build-dependencies by pest_meta
+    "RUSTSEC-2020-0146",
+]
+
+[bans]
+multiple-versions = "warn"
+deny = [
+    # color-backtrace is nice but brings in too many dependencies and that are often outdated, so not worth it for us.
+    { name = "color-backtrace" },
+
+    # deprecated
+    { name = "quickersort" },
+
+    # term is not fully maintained, and termcolor is replacing it
+    { name = "term" },
+]
+skip-tree = [ 
+    { name = "winapi", version = "<= 0.3" },
+]
+
+[licenses]
+unlicensed = "deny"
+# We want really high confidence when inferring licenses from text
+confidence-threshold = 0.92
+allow = [
+    "AGPL-3.0",
+    "Apache-2.0",
+    "BSD-2-Clause",
+    "BSD-3-Clause",
+    "CC0-1.0",
+    "ISC",
+    "MIT",
+    "MPL-2.0",
+    "OpenSSL",
+    "Zlib"
+]
+
+[[licenses.clarify]]
+name = "ring"
+# SPDX considers OpenSSL to encompass both the OpenSSL and SSLeay licenses
+# https://spdx.org/licenses/OpenSSL.html
+# ISC - Both BoringSSL and ring use this for their new files
+# MIT - "Files in third_party/ have their own licenses, as described therein. The MIT
+# license, for third_party/fiat, which, unlike other third_party directories, is
+# compiled into non-test libraries, is included below."
+# OpenSSL - Obviously
+expression = "ISC AND MIT AND OpenSSL"
+license-files = [
+    { path = "LICENSE", hash = 0xbd0eed23 },
+]
+
+[[licenses.exceptions]]
+allow = ["Unlicense"]
+name = "async_io_stream"
+version = "0.3.1"
+
+[sources]
+unknown-registry = "deny"
+unknown-git = "deny"
diff --git a/dubp-wot/Cargo.toml b/dubp-wot/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..5a060429e0738d0acc9d7045c7dd4052086127fd
--- /dev/null
+++ b/dubp-wot/Cargo.toml
@@ -0,0 +1,23 @@
+[package]
+name = "dubp-wot"
+version = "0.11.0"
+authors = ["nanocryk <nanocryk@duniter.org>", "elois <elois@duniter.org>"]
+description = "Makes Web of Trust computations for the Duniter project."
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+readme = "README.md"
+keywords = ["duniter", "wot", "trust"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+log = "0.4.8"
+rayon = "1.3.0"
+serde = { version = "1.0.105", features = ["derive"] }
+
+[dev-dependencies]
+bincode = "1.2.0"
+
+[features]
diff --git a/dubp-wot/README.md b/dubp-wot/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2d3dcdfc4744684ca0f92b45c2421a52586b78a4
--- /dev/null
+++ b/dubp-wot/README.md
@@ -0,0 +1,10 @@
+# wot
+
+`dubp-wot` is a crate making "Web of Trust" computations for
+the [Duniter] project.
+
+[Duniter]: https://duniter.org/en/
+
+## How to use it
+
+You can add `dubp-wot` as a `cargo` dependency in your Rust project.
diff --git a/dubp-wot/src/data/mod.rs b/dubp-wot/src/data/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..eddc11e19b5083148615cd53848fd212cd894f6d
--- /dev/null
+++ b/dubp-wot/src/data/mod.rs
@@ -0,0 +1,196 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Provide data structures to manage web of trusts.
+//! `LegacyWebOfTrust` is almost a translation of the legacy C++ coden while
+//! `RustyWebOfTrust` is a brand new implementation with a more "rusty" style.
+
+pub mod rusty;
+
+use serde::de::{self, Deserialize, DeserializeOwned, Deserializer, Visitor};
+use serde::{Serialize, Serializer};
+use std::{
+    fmt::{self, Debug},
+    io::Write,
+};
+
+/// Wrapper for a node id.
+#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct WotId(pub usize);
+
+impl Serialize for WotId {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: Serializer,
+    {
+        serializer.serialize_u32(self.0 as u32)
+    }
+}
+
+struct WotIdVisitor;
+
+impl<'de> Visitor<'de> for WotIdVisitor {
+    type Value = WotId;
+
+    fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+        formatter.write_str("an integer between -2^31 and 2^31")
+    }
+
+    fn visit_u8<E>(self, value: u8) -> Result<WotId, E>
+    where
+        E: de::Error,
+    {
+        Ok(WotId(value as usize))
+    }
+
+    fn visit_u32<E>(self, value: u32) -> Result<WotId, E>
+    where
+        E: de::Error,
+    {
+        Ok(WotId(value as usize))
+    }
+
+    fn visit_u64<E>(self, value: u64) -> Result<WotId, E>
+    where
+        E: de::Error,
+    {
+        use std::usize;
+        if value >= usize::MIN as u64 && value <= usize::MAX as u64 {
+            Ok(WotId(value as usize))
+        } else {
+            Err(E::custom(format!("u32 out of range: {}", value)))
+        }
+    }
+}
+
+impl<'de> Deserialize<'de> for WotId {
+    fn deserialize<D>(deserializer: D) -> Result<WotId, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        deserializer.deserialize_u32(WotIdVisitor)
+    }
+}
+
+/// Results of a certification, with the current certification count
+/// of the destination as parameter.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum NewLinkResult {
+    /// Certification worked.
+    Ok(usize),
+    /// All available certifications has been used.
+    AllCertificationsUsed(usize),
+    /// Unknown source.
+    UnknownSource(),
+    /// Unknown target.
+    UnknownTarget(),
+    /// Self linking is forbidden.
+    SelfLinkingForbidden(),
+}
+
+/// Results of a certification removal, with the current certification count
+/// of the destination as parameter.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum RemLinkResult {
+    /// Certification has been removed.
+    Removed(usize),
+    /// Requested certification doesn't exist.
+    UnknownCert(usize),
+    /// Unknown source.
+    UnknownSource(),
+    /// Unknown target.
+    UnknownTarget(),
+}
+
+/// Results of a certification test.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum HasLinkResult {
+    /// Both nodes are known, here is the result.
+    Link(bool),
+    /// Unknown source.
+    UnknownSource(),
+    /// Unknown target.
+    UnknownTarget(),
+}
+
+/// Trait for a Web Of Trust.
+/// Allow to provide other implementations of the `WoT` logic instead of the legacy C++
+/// translated one.
+pub trait WebOfTrust: Clone + Debug + Default + DeserializeOwned + Send + Serialize + Sync {
+    /// Create a new Web of Trust with the maximum of links a node can issue.
+    fn new(max_links: usize) -> Self;
+
+    /// Clear Web of Trust datas
+    fn clear(&mut self);
+
+    /// Get the maximum number of links per user.
+    fn get_max_link(&self) -> usize;
+
+    /// Set the maximum number of links per user.
+    fn set_max_link(&mut self, max_link: usize);
+
+    /// Add a new node.
+    fn add_node(&mut self) -> WotId;
+
+    /// Remove the last node.
+    /// Returns `None` if the WoT was empty, otherwise new top node id.
+    fn rem_node(&mut self) -> Option<WotId>;
+
+    /// Get the size of the WoT.
+    fn size(&self) -> usize;
+
+    /// Check if given node is enabled.
+    /// Returns `None` if this node doesn't exist.
+    fn is_enabled(&self, id: WotId) -> Option<bool>;
+
+    /// Set the enabled state of given node.
+    /// Returns `Null` if this node doesn't exist, `enabled` otherwise.
+    fn set_enabled(&mut self, id: WotId, enabled: bool) -> Option<bool>;
+
+    /// Get enabled node array.
+    fn get_enabled(&self) -> Vec<WotId>;
+
+    /// Get disabled node array.
+    fn get_disabled(&self) -> Vec<WotId>;
+
+    /// Try to add a link from the source to the target.
+    fn add_link(&mut self, source: WotId, target: WotId) -> NewLinkResult;
+
+    /// Try to remove a link from the source to the target.
+    fn rem_link(&mut self, source: WotId, target: WotId) -> RemLinkResult;
+
+    /// Test if there is a link from the source to the target.
+    fn has_link(&self, source: WotId, target: WotId) -> HasLinkResult;
+
+    /// Get the list of links source for this target.
+    /// Returns `None` if this node doesn't exist.
+    fn get_links_source(&self, target: WotId) -> Option<Vec<WotId>>;
+
+    /// Get the number of issued links by a node.
+    /// Returns `None` if this node doesn't exist.
+    fn issued_count(&self, id: WotId) -> Option<usize>;
+
+    /// Test if a node is a sentry.
+    fn is_sentry(&self, node: WotId, sentry_requirement: usize) -> Option<bool>;
+
+    /// Get sentries array.
+    fn get_sentries(&self, sentry_requirement: usize) -> Vec<WotId>;
+
+    /// Get non sentries array.
+    fn get_non_sentries(&self, sentry_requirement: usize) -> Vec<WotId>;
+
+    /// Dump wot
+    fn dump<W: Write>(&self, output: &mut W) -> std::io::Result<()>;
+}
diff --git a/dubp-wot/src/data/rusty.rs b/dubp-wot/src/data/rusty.rs
new file mode 100644
index 0000000000000000000000000000000000000000..398027350a5fc2d25ea1da83ba0c58e252d2e7fc
--- /dev/null
+++ b/dubp-wot/src/data/rusty.rs
@@ -0,0 +1,258 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Experimental implementation of the Web of Trust in a more "rusty" style.
+
+use super::{HasLinkResult, NewLinkResult, RemLinkResult};
+use crate::WebOfTrust;
+use crate::WotId;
+use rayon::prelude::*;
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+
+/// A node in the `WoT` graph.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+struct Node {
+    /// Is this node enabled ?
+    enabled: bool,
+    /// Set of links this node is the target.
+    links_source: HashSet<WotId>,
+    /// Number of links the node issued.
+    issued_count: usize,
+}
+
+impl Node {
+    /// Create a new node.
+    pub fn new() -> Node {
+        Node {
+            enabled: true,
+            links_source: HashSet::new(),
+            issued_count: 0,
+        }
+    }
+}
+
+/// A more idiomatic implementation of a Web of Trust.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct RustyWebOfTrust {
+    /// List of nodes in the WoT.
+    nodes: Vec<Node>,
+    /// Maximum number of links a node can issue.
+    max_links: usize,
+}
+
+impl Default for RustyWebOfTrust {
+    fn default() -> RustyWebOfTrust {
+        RustyWebOfTrust {
+            nodes: Vec::new(),
+            max_links: 4_000_000_000,
+        }
+    }
+}
+
+impl WebOfTrust for RustyWebOfTrust {
+    fn new(max_links: usize) -> RustyWebOfTrust {
+        RustyWebOfTrust {
+            nodes: vec![],
+            max_links,
+        }
+    }
+
+    fn clear(&mut self) {
+        self.nodes = Vec::new();
+    }
+
+    fn get_max_link(&self) -> usize {
+        self.max_links
+    }
+
+    fn set_max_link(&mut self, max_links: usize) {
+        self.max_links = max_links;
+    }
+
+    fn add_node(&mut self) -> WotId {
+        self.nodes.push(Node::new());
+        WotId(self.nodes.len() - 1)
+    }
+
+    fn rem_node(&mut self) -> Option<WotId> {
+        if !self.nodes.is_empty() {
+            self.nodes.pop();
+            Some(WotId(self.nodes.len()))
+        } else {
+            None
+        }
+    }
+
+    fn size(&self) -> usize {
+        self.nodes.len()
+    }
+
+    fn is_enabled(&self, id: WotId) -> Option<bool> {
+        self.nodes.get(id.0).map(|n| n.enabled)
+    }
+
+    fn set_enabled(&mut self, id: WotId, enabled: bool) -> Option<bool> {
+        self.nodes
+            .get_mut(id.0)
+            .map(|n| n.enabled = enabled)
+            .map(|_| enabled)
+    }
+
+    fn get_enabled(&self) -> Vec<WotId> {
+        self.nodes
+            .par_iter()
+            .enumerate()
+            .filter(|&(_, n)| n.enabled)
+            .map(|(i, _)| WotId(i))
+            .collect()
+    }
+
+    fn get_disabled(&self) -> Vec<WotId> {
+        self.nodes
+            .par_iter()
+            .enumerate()
+            .filter(|&(_, n)| !n.enabled)
+            .map(|(i, _)| WotId(i))
+            .collect()
+    }
+
+    fn add_link(&mut self, source: WotId, target: WotId) -> NewLinkResult {
+        if source == target {
+            NewLinkResult::SelfLinkingForbidden()
+        } else if source.0 >= self.size() {
+            NewLinkResult::UnknownSource()
+        } else if target.0 >= self.size() {
+            NewLinkResult::UnknownTarget()
+        } else if self.nodes[source.0].issued_count >= self.max_links {
+            NewLinkResult::AllCertificationsUsed(self.nodes[target.0].links_source.len())
+        } else {
+            self.nodes[source.0].issued_count += 1;
+            self.nodes[target.0].links_source.insert(source);
+            NewLinkResult::Ok(self.nodes[target.0].links_source.len())
+        }
+    }
+
+    fn rem_link(&mut self, source: WotId, target: WotId) -> RemLinkResult {
+        if source.0 >= self.size() {
+            RemLinkResult::UnknownSource()
+        } else if target.0 >= self.size() {
+            RemLinkResult::UnknownTarget()
+        } else if !self.nodes[target.0].links_source.contains(&source) {
+            RemLinkResult::UnknownCert(self.nodes[target.0].links_source.len())
+        } else {
+            self.nodes[source.0].issued_count -= 1;
+            self.nodes[target.0].links_source.remove(&source);
+            RemLinkResult::Removed(self.nodes[target.0].links_source.len())
+        }
+    }
+
+    fn has_link(&self, source: WotId, target: WotId) -> HasLinkResult {
+        if source.0 >= self.size() {
+            HasLinkResult::UnknownSource()
+        } else if target.0 >= self.size() {
+            HasLinkResult::UnknownTarget()
+        } else {
+            HasLinkResult::Link(self.nodes[target.0].links_source.contains(&source))
+        }
+    }
+
+    fn get_links_source(&self, target: WotId) -> Option<Vec<WotId>> {
+        self.nodes
+            .get(target.0)
+            .map(|n| n.links_source.iter().cloned().collect())
+    }
+
+    fn issued_count(&self, id: WotId) -> Option<usize> {
+        self.nodes.get(id.0).map(|n| n.issued_count)
+    }
+
+    fn is_sentry(&self, node: WotId, sentry_requirement: usize) -> Option<bool> {
+        if node.0 >= self.size() {
+            return None;
+        }
+
+        let node = &self.nodes[node.0];
+
+        Some(
+            node.enabled
+                && node.issued_count >= sentry_requirement
+                && node.links_source.len() >= sentry_requirement,
+        )
+    }
+
+    fn get_sentries(&self, sentry_requirement: usize) -> Vec<WotId> {
+        self.nodes
+            .par_iter()
+            .enumerate()
+            .filter(|&(_, n)| {
+                n.enabled
+                    && n.issued_count >= sentry_requirement
+                    && n.links_source.len() >= sentry_requirement
+            })
+            .map(|(i, _)| WotId(i))
+            .collect()
+    }
+
+    fn get_non_sentries(&self, sentry_requirement: usize) -> Vec<WotId> {
+        self.nodes
+            .par_iter()
+            .enumerate()
+            .filter(|&(_, n)| {
+                n.enabled
+                    && (n.issued_count < sentry_requirement
+                        || n.links_source.len() < sentry_requirement)
+            })
+            .map(|(i, _)| WotId(i))
+            .collect()
+    }
+
+    fn dump<W: std::io::Write>(&self, output: &mut W) -> std::io::Result<()> {
+        writeln!(output, "max_links={}", self.max_links)?;
+        writeln!(output, "nodes_count={}", self.nodes.len())?;
+        for (node_id, node) in self.nodes.iter().enumerate() {
+            write!(output, "{:03}: ", node_id)?;
+            if !node.enabled {
+                write!(output, "disabled ")?;
+            }
+            // dump sources
+            write!(output, "[")?;
+            let mut sorted_sources = node.links_source.iter().copied().collect::<Vec<WotId>>();
+            sorted_sources.sort_unstable();
+            let mut remaining_sources = sorted_sources.len();
+            for source in &sorted_sources {
+                if remaining_sources == 1 {
+                    write!(output, "{}", source.0)?;
+                } else {
+                    write!(output, "{}, ", source.0)?;
+                    remaining_sources -= 1;
+                }
+            }
+            writeln!(output, "]")?;
+        }
+        Ok(())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::tests::generic_wot_test;
+
+    #[test]
+    fn wot_tests() {
+        generic_wot_test::<RustyWebOfTrust>();
+    }
+}
diff --git a/dubp-wot/src/lib.rs b/dubp-wot/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..dbfc71b4b46e4801c83cbe4d777815436691678f
--- /dev/null
+++ b/dubp-wot/src/lib.rs
@@ -0,0 +1,561 @@
+//  Copyright (C) 2017-2020  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! `wot` is a crate making "Web of Trust" computations for
+//! the [Duniter] project.
+//!
+//! [Duniter]: https://duniter.org/
+//!
+//! It defines a trait representing a Web of Trust and allow to do calculations on it.
+//!
+//! It also contains an "legacy" implementation translated from the original C++ code.
+//!
+//! Web of Trust tests are translated from [duniter/wot Javascript test][js-tests].
+//!
+//! [js-tests]: https://github.com/duniter/wot/blob/master/wotcpp/webOfTrust.cpp
+
+#![deny(
+    clippy::unwrap_used,
+    missing_docs,
+    missing_debug_implementations,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unsafe_code,
+    unstable_features,
+    unused_import_braces,
+    unused_qualifications
+)]
+
+pub mod data;
+pub mod operations;
+
+pub use crate::data::{WebOfTrust, WotId};
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use crate::data::*;
+    use crate::operations::centrality::*;
+    use crate::operations::distance::*;
+    use crate::operations::path::*;
+    use std::{io::Read, io::Write, path::Path};
+
+    fn read_bin_file(file_path: &Path) -> Result<Vec<u8>, std::io::Error> {
+        let mut file = std::fs::File::open(file_path)?;
+        if file.metadata()?.len() == 0 {
+            Ok(vec![])
+        } else {
+            let mut bin_datas = Vec::new();
+            file.read_to_end(&mut bin_datas)?;
+
+            Ok(bin_datas)
+        }
+    }
+
+    fn write_bin_file(file_path: &Path, datas: &[u8]) -> Result<(), std::io::Error> {
+        let mut file = std::fs::File::create(file_path)?;
+        file.write_all(datas)?;
+
+        Ok(())
+    }
+
+    /// Test translated from https://github.com/duniter/wot/blob/master/tests/test.js
+    ///
+    /// Clone and file tests are not included in this generic test and should be done in
+    /// the implementation test.
+    #[allow(clippy::cognitive_complexity)]
+    pub fn generic_wot_test<W>()
+    where
+        W: WebOfTrust + Sync,
+    {
+        let centralities_calculator = UlrikBrandesCentralityCalculator {};
+        let distance_calculator = RustyDistanceCalculator {};
+        let path_finder = RustyPathFinder {};
+        let mut wot = W::new(3);
+
+        // should have an initial size of 0
+        assert_eq!(wot.size(), 0);
+
+        // should return `None()` if testing `is_enabled()` with out-of-bounds node
+        assert_eq!(wot.is_enabled(WotId(0)), None);
+        assert_eq!(wot.is_enabled(WotId(23)), None);
+
+        // should give nomber 0 if we add a node
+        // - add a node
+        assert_eq!(wot.add_node(), WotId(0));
+        assert_eq!(wot.size(), 1);
+        assert_eq!(wot.get_disabled().len(), 0);
+
+        // delete top node (return new top node id)
+        assert_eq!(wot.rem_node(), Some(WotId(0)));
+        assert_eq!(wot.size(), 0);
+
+        // readd node
+        assert_eq!(wot.add_node(), WotId(0));
+
+        // - add another
+        assert_eq!(wot.add_node(), WotId(1));
+        assert_eq!(wot.size(), 2);
+        assert_eq!(wot.get_disabled().len(), 0);
+
+        // - add 10 nodes
+        for i in 0..10 {
+            assert_eq!(wot.add_node(), WotId(i + 2));
+        }
+
+        assert_eq!(wot.size(), 12);
+
+        // shouldn't be able to self cert
+        assert_eq!(
+            wot.add_link(WotId(0), WotId(0)),
+            NewLinkResult::SelfLinkingForbidden()
+        );
+
+        // should add certs only in the boundaries of max_cert
+        assert_eq!(wot.add_link(WotId(0), WotId(1)), NewLinkResult::Ok(1));
+        assert_eq!(wot.add_link(WotId(0), WotId(2)), NewLinkResult::Ok(1));
+        assert_eq!(wot.add_link(WotId(0), WotId(3)), NewLinkResult::Ok(1));
+        assert_eq!(
+            wot.add_link(WotId(0), WotId(4)),
+            NewLinkResult::AllCertificationsUsed(0)
+        );
+
+        assert_eq!(wot.get_max_link(), 3);
+        assert_eq!(wot.has_link(WotId(0), WotId(1)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(0), WotId(2)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(0), WotId(3)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(0), WotId(4)), HasLinkResult::Link(false));
+
+        wot.set_max_link(4);
+        assert_eq!(wot.get_max_link(), 4);
+        assert_eq!(wot.has_link(WotId(0), WotId(4)), HasLinkResult::Link(false));
+        wot.add_link(WotId(0), WotId(4));
+        assert_eq!(wot.has_link(WotId(0), WotId(4)), HasLinkResult::Link(true));
+        wot.rem_link(WotId(0), WotId(1));
+        wot.rem_link(WotId(0), WotId(2));
+        wot.rem_link(WotId(0), WotId(3));
+        wot.rem_link(WotId(0), WotId(4));
+
+        // false when not linked + test out of bounds
+        assert_eq!(wot.has_link(WotId(0), WotId(6)), HasLinkResult::Link(false));
+        assert_eq!(
+            wot.has_link(WotId(23), WotId(0)),
+            HasLinkResult::UnknownSource()
+        );
+        assert_eq!(
+            wot.has_link(WotId(2), WotId(53)),
+            HasLinkResult::UnknownTarget()
+        );
+
+        // created nodes should be enabled
+        assert_eq!(wot.is_enabled(WotId(0)), Some(true));
+        assert_eq!(wot.is_enabled(WotId(1)), Some(true));
+        assert_eq!(wot.is_enabled(WotId(2)), Some(true));
+        assert_eq!(wot.is_enabled(WotId(3)), Some(true));
+        assert_eq!(wot.is_enabled(WotId(11)), Some(true));
+
+        // should be able to disable some nodes
+        assert_eq!(wot.set_enabled(WotId(0), false), Some(false));
+        assert_eq!(wot.set_enabled(WotId(1), false), Some(false));
+        assert_eq!(wot.set_enabled(WotId(2), false), Some(false));
+        assert_eq!(wot.get_disabled().len(), 3);
+        assert_eq!(wot.set_enabled(WotId(1), true), Some(true));
+
+        // node 0 and 2 should be disabled
+        assert_eq!(wot.is_enabled(WotId(0)), Some(false));
+        assert_eq!(wot.is_enabled(WotId(1)), Some(true));
+        assert_eq!(wot.is_enabled(WotId(2)), Some(false));
+        assert_eq!(wot.is_enabled(WotId(3)), Some(true));
+        // - set enabled again
+        assert_eq!(wot.set_enabled(WotId(0), true), Some(true));
+        assert_eq!(wot.set_enabled(WotId(1), true), Some(true));
+        assert_eq!(wot.set_enabled(WotId(2), true), Some(true));
+        assert_eq!(wot.set_enabled(WotId(1), true), Some(true));
+        assert_eq!(wot.get_disabled().len(), 0);
+
+        // should not exist a link from 2 to 0
+        assert_eq!(wot.has_link(WotId(2), WotId(0)), HasLinkResult::Link(false));
+
+        // should be able to add some links, cert count is returned
+        assert_eq!(wot.add_link(WotId(2), WotId(0)), NewLinkResult::Ok(1));
+        assert_eq!(wot.add_link(WotId(4), WotId(0)), NewLinkResult::Ok(2));
+        assert_eq!(wot.add_link(WotId(5), WotId(0)), NewLinkResult::Ok(3));
+
+        // should exist new links
+        /* WoT is:
+         *
+         * 2 --> 0
+         * 4 --> 0
+         * 5 --> 0
+         */
+
+        assert_eq!(wot.has_link(WotId(2), WotId(0)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(4), WotId(0)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(5), WotId(0)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(2), WotId(1)), HasLinkResult::Link(false));
+
+        // should be able to remove some links
+        assert_eq!(wot.rem_link(WotId(4), WotId(0)), RemLinkResult::Removed(2));
+        /*
+         * WoT is now:
+         *
+         * 2 --> 0
+         * 5 --> 0
+         */
+
+        // should exist less links
+        assert_eq!(wot.has_link(WotId(2), WotId(0)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(4), WotId(0)), HasLinkResult::Link(false));
+        assert_eq!(wot.has_link(WotId(5), WotId(0)), HasLinkResult::Link(true));
+        assert_eq!(wot.has_link(WotId(2), WotId(1)), HasLinkResult::Link(false));
+
+        // should successfully use distance rule
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 1,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        );
+        // => no because 2,4,5 have certified him
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 2,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        );
+        // => no because only member 2 has 2 certs, and has certified him
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 3,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        );
+        // => no because no member has issued 3 certifications
+
+        // - we add links from member 3
+        assert_eq!(wot.add_link(WotId(3), WotId(1)), NewLinkResult::Ok(1));
+        assert_eq!(wot.add_link(WotId(3), WotId(2)), NewLinkResult::Ok(1));
+        /*
+         * WoT is now:
+         *
+         * 2 --> 0
+         * 5 --> 0
+         * 3 --> 1
+         * 3 --> 2
+         */
+        assert_eq!(wot.size(), 12);
+        assert_eq!(wot.get_sentries(1).len(), 1);
+        assert_eq!(wot.get_sentries(1)[0], WotId(2));
+        assert_eq!(wot.get_sentries(2).len(), 0);
+        assert_eq!(wot.get_sentries(3).len(), 0);
+        assert_eq!(wot.get_non_sentries(1).len(), 11); // 12 - 1
+        assert_eq!(wot.get_non_sentries(2).len(), 12); // 12 - 0
+        assert_eq!(wot.get_non_sentries(3).len(), 12); // 12 - 0
+        assert_eq!(path_finder.find_paths(&wot, WotId(3), WotId(0), 1).len(), 0); // KO
+        assert_eq!(path_finder.find_paths(&wot, WotId(3), WotId(0), 2).len(), 1); // It exists 3 -> 2 -> 0
+        assert!(path_finder
+            .find_paths(&wot, WotId(3), WotId(0), 2)
+            .contains(&vec![WotId(3), WotId(2), WotId(0)]));
+
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 1,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : 2 -> 0
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 2,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : 2 -> 0
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 3,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : no stry \w 3 lnk
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 2,
+                    step_max: 2,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : 2 -> 0
+
+        wot.add_link(WotId(1), WotId(3));
+        wot.add_link(WotId(2), WotId(3));
+
+        assert_eq!(wot.size(), 12);
+        assert_eq!(wot.get_sentries(1).len(), 3);
+        assert_eq!(wot.get_sentries(1)[0], WotId(1));
+        assert_eq!(wot.get_sentries(1)[1], WotId(2));
+        assert_eq!(wot.get_sentries(1)[2], WotId(3));
+
+        assert_eq!(wot.get_sentries(2).len(), 1);
+        assert_eq!(wot.get_sentries(2)[0], WotId(3));
+        assert_eq!(wot.get_sentries(3).len(), 0);
+        assert_eq!(wot.get_non_sentries(1).len(), 9); // 12 - 3
+        assert_eq!(wot.get_non_sentries(2).len(), 11); // 12 - 1
+        assert_eq!(wot.get_non_sentries(3).len(), 12); // 12 - 0
+        assert_eq!(path_finder.find_paths(&wot, WotId(3), WotId(0), 1).len(), 0); // KO
+        assert_eq!(path_finder.find_paths(&wot, WotId(3), WotId(0), 2).len(), 1); // It exists 3 -> 2 -> 0
+        assert!(path_finder
+            .find_paths(&wot, WotId(3), WotId(0), 2)
+            .contains(&vec![WotId(3), WotId(2), WotId(0)]));
+
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 1,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(true)
+        ); // KO : No path 3 -> 0
+           /*assert_eq!(
+               distance_calculator.is_outdistanced(
+                   &wot,
+                   WotDistanceParameters {
+                       node: WotId(0),
+                       sentry_requirement: 2,
+                       step_max: 1,
+                       x_percent: 1.0,
+                   },
+               ),
+               Some(true)
+           );*/ // KO : No path 3 -> 0
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 3,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : no stry \w 3 lnk
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 2,
+                    step_max: 2,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : 3 -> 2 -> 0
+
+        // should have 12 nodes
+        assert_eq!(wot.size(), 12);
+
+        // delete top node (return new top node id)
+        assert_eq!(wot.rem_node(), Some(WotId(11)));
+
+        // should have 11 nodes
+        assert_eq!(wot.size(), 11);
+
+        // should work with member 3 disabled
+        // - with member 3 disabled (non-member)
+        assert_eq!(wot.set_enabled(WotId(3), false), Some(false));
+        assert_eq!(wot.get_disabled().len(), 1);
+        assert_eq!(
+            distance_calculator.is_outdistanced(
+                &wot,
+                WotDistanceParameters {
+                    node: WotId(0),
+                    sentry_requirement: 2,
+                    step_max: 1,
+                    x_percent: 1.0,
+                },
+            ),
+            Ok(false)
+        ); // OK : Disabled
+
+        // Write wot in file
+        write_bin_file(
+            Path::new("test.wot"),
+            &bincode::serialize(&wot).expect("fail to serialize wot"),
+        )
+        .expect("fail to write wot file");
+
+        let wot2_bin = read_bin_file(Path::new("test.wot")).expect("fail to read wot file");
+        let wot2: W = bincode::deserialize(&wot2_bin).expect("fail to deserialize wot");
+
+        // Read wot from file
+        {
+            assert_eq!(wot.size(), wot2.size());
+            assert_eq!(
+                wot.get_non_sentries(1).len(),
+                wot2.get_non_sentries(1).len()
+            );
+            assert_eq!(wot.get_disabled().len(), wot2.get_disabled().len());
+            assert_eq!(wot2.get_disabled().len(), 1);
+            assert_eq!(wot2.is_enabled(WotId(3)), Some(false));
+            assert_eq!(
+                distance_calculator.is_outdistanced(
+                    &wot2,
+                    WotDistanceParameters {
+                        node: WotId(0),
+                        sentry_requirement: 2,
+                        step_max: 1,
+                        x_percent: 1.0,
+                    },
+                ),
+                Ok(false)
+            );
+        }
+
+        // Dump wot
+        let mut dump_wot2_chars = Vec::new();
+        wot2.dump(&mut dump_wot2_chars).expect("fail to dump wot2");
+        let dump_wot2_str = String::from_utf8(dump_wot2_chars).expect("invalid utf8 chars");
+        assert_eq!(
+            dump_wot2_str,
+            "max_links=4
+nodes_count=11
+000: [2, 5]
+001: [3]
+002: [3]
+003: disabled [1, 2]
+004: []
+005: []
+006: []
+007: []
+008: []
+009: []
+010: []
+"
+        );
+
+        // Read g1_genesis wot
+        let wot3_bin = read_bin_file(Path::new("tests/g1_genesis.bin"))
+            .expect("fail to read g1_genesis wot file");
+        let wot3: W = bincode::deserialize(&wot3_bin).expect("fail to deserialize g1_genesis wot");
+
+        // Check g1_genesis wot members_count
+        let members_count = wot3.get_enabled().len() as u64;
+        assert_eq!(members_count, 59);
+
+        // Test compute_distance in g1_genesis wot
+        assert_eq!(
+            distance_calculator.compute_distance(
+                &wot3,
+                WotDistanceParameters {
+                    node: WotId(37),
+                    sentry_requirement: 3,
+                    step_max: 5,
+                    x_percent: 0.8,
+                },
+            ),
+            Ok(WotDistance {
+                sentries: 48,
+                success: 48,
+                success_at_border: 3,
+                reached: 51,
+                reached_at_border: 3,
+                outdistanced: false,
+            },)
+        );
+
+        // Test betweenness centralities computation in g1_genesis wot
+        let centralities = centralities_calculator.betweenness_centralities(&wot3);
+        assert_eq!(centralities.len(), 59);
+        assert_eq!(
+            centralities,
+            vec![
+                148, 30, 184, 11, 60, 51, 40, 115, 24, 140, 47, 69, 16, 34, 94, 126, 151, 0, 34,
+                133, 20, 103, 38, 144, 73, 523, 124, 23, 47, 17, 9, 64, 77, 281, 6, 105, 54, 0,
+                111, 21, 6, 2, 0, 1, 47, 59, 28, 236, 0, 0, 0, 0, 60, 6, 0, 1, 8, 33, 169,
+            ]
+        );
+
+        // Test stress centralities computation in g1_genesis wot
+        let stress_centralities = centralities_calculator.stress_centralities(&wot3);
+        assert_eq!(stress_centralities.len(), 59);
+        assert_eq!(
+            stress_centralities,
+            vec![
+                848, 240, 955, 80, 416, 203, 290, 645, 166, 908, 313, 231, 101, 202, 487, 769, 984,
+                0, 154, 534, 105, 697, 260, 700, 496, 1726, 711, 160, 217, 192, 89, 430, 636, 1276,
+                41, 420, 310, 0, 357, 125, 50, 15, 0, 12, 275, 170, 215, 1199, 0, 0, 0, 0, 201, 31,
+                0, 9, 55, 216, 865,
+            ]
+        );
+
+        // Test distance stress centralities computation in g1_genesis wot
+        let distance_stress_centralities =
+            centralities_calculator.distance_stress_centralities(&wot3, 5);
+        assert_eq!(distance_stress_centralities.len(), 59);
+        assert_eq!(
+            distance_stress_centralities,
+            vec![
+                848, 240, 955, 80, 416, 203, 290, 645, 166, 908, 313, 231, 101, 202, 487, 769, 984,
+                0, 154, 534, 105, 697, 260, 700, 496, 1726, 711, 160, 217, 192, 89, 430, 636, 1276,
+                41, 420, 310, 0, 357, 125, 50, 15, 0, 12, 275, 170, 215, 1199, 0, 0, 0, 0, 201, 31,
+                0, 9, 55, 216, 865,
+            ]
+        );
+    }
+}
diff --git a/dubp-wot/src/operations/centrality.rs b/dubp-wot/src/operations/centrality.rs
new file mode 100644
index 0000000000000000000000000000000000000000..933e43621c24863dbfb6b410d336dff7a32d9c59
--- /dev/null
+++ b/dubp-wot/src/operations/centrality.rs
@@ -0,0 +1,192 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Provide a trait and implementations to find paths between nodes.
+
+use crate::data::WebOfTrust;
+use crate::data::WotId;
+use std::collections::{HashMap, VecDeque};
+
+/// Find paths between 2 nodes of a `WebOfTrust`.
+pub trait CentralitiesCalculator<T: WebOfTrust> {
+    /// Compute betweenness centrality of all members.
+    fn betweenness_centralities(&self, wot: &T) -> Vec<u64>;
+    /// Compute stress centrality of all members.
+    fn stress_centralities(&self, wot: &T) -> Vec<u64>;
+    /// Compute distance stress centrality of all members.
+    fn distance_stress_centralities(&self, wot: &T, step_max: usize) -> Vec<u64>;
+}
+
+/// An implementation based on "Ulrik brandes" algo.
+#[derive(Debug, Clone, Copy)]
+pub struct UlrikBrandesCentralityCalculator;
+
+impl<T: WebOfTrust> CentralitiesCalculator<T> for UlrikBrandesCentralityCalculator {
+    fn betweenness_centralities(&self, wot: &T) -> Vec<u64> {
+        let wot_size = wot.size();
+        let mut centralities = vec![0.0; wot_size];
+        let enabled_nodes = wot.get_enabled();
+
+        // The source of any path belongs to enabled_nodes
+        for s in enabled_nodes.clone() {
+            let mut stack: Vec<WotId> = Vec::with_capacity(wot_size);
+            let mut paths: HashMap<WotId, Vec<WotId>> = HashMap::with_capacity(wot_size);
+            let mut sigma = vec![0.0; wot_size];
+            let mut d: Vec<isize> = vec![-1; wot_size];
+            let mut q: VecDeque<WotId> = VecDeque::with_capacity(wot_size);
+
+            sigma[s.0] = 1.0;
+            d[s.0] = 0;
+            q.push_back(s);
+            while let Some(v) = q.pop_front() {
+                stack.push(v);
+                for w in wot.get_links_source(v).expect("v don't have any source !") {
+                    // w found for the first time ?
+                    if d[w.0] < 0 {
+                        q.push_back(w);
+                        d[w.0] = d[v.0] + 1;
+                    }
+                    // Shortest path to w via v
+                    if d[w.0] == d[v.0] + 1 {
+                        sigma[w.0] += sigma[v.0];
+                        paths.entry(w).or_insert_with(Vec::new).push(v);
+                    }
+                }
+            }
+            let mut delta = vec![0.0; wot_size];
+            // stack returns vertices in order of non-increasing distance from s
+            while let Some(w) = stack.pop() {
+                if paths.contains_key(&w) {
+                    for v in paths.get(&w).expect("Not found w in p !") {
+                        if enabled_nodes.contains(&w) {
+                            delta[v.0] += (sigma[v.0] / sigma[w.0]) * (1.0 + delta[w.0]);
+                        } else {
+                            // If w not in enabled_nodes, no path can end at w
+                            delta[v.0] += (sigma[v.0] / sigma[w.0]) * delta[w.0];
+                        }
+                    }
+                }
+                if w != s {
+                    centralities[w.0] += delta[w.0];
+                }
+            }
+        }
+        centralities.into_iter().map(|c| c as u64).collect()
+    }
+    fn stress_centralities(&self, wot: &T) -> Vec<u64> {
+        let wot_size = wot.size();
+        let mut centralities = vec![0.0; wot_size];
+        let enabled_nodes = wot.get_enabled();
+
+        // The source of any path belongs to enabled_nodes
+        for s in enabled_nodes.clone() {
+            let mut stack: Vec<WotId> = Vec::with_capacity(wot_size);
+            let mut paths: HashMap<WotId, Vec<WotId>> = HashMap::with_capacity(wot_size);
+            let mut sigma = vec![0.0; wot_size];
+            let mut d: Vec<isize> = vec![-1; wot_size];
+            let mut q: VecDeque<WotId> = VecDeque::with_capacity(wot_size);
+
+            sigma[s.0] = 1.0;
+            d[s.0] = 0;
+            q.push_back(s);
+            while let Some(v) = q.pop_front() {
+                stack.push(v);
+                for w in wot.get_links_source(v).expect("v don't have any source !") {
+                    // w found for the first time ?
+                    if d[w.0] < 0 {
+                        q.push_back(w);
+                        d[w.0] = d[v.0] + 1;
+                    }
+                    // Shortest path to w via v
+                    if d[w.0] == d[v.0] + 1 {
+                        sigma[w.0] += sigma[v.0];
+                        paths.entry(w).or_insert_with(Vec::new).push(v);
+                    }
+                }
+            }
+            let mut delta = vec![0.0; wot_size];
+            // stack returns vertices in order of non-increasing distance from s
+            while let Some(w) = stack.pop() {
+                if paths.contains_key(&w) {
+                    for v in paths.get(&w).expect("Not found w in p !") {
+                        if enabled_nodes.contains(&w) {
+                            delta[v.0] += sigma[v.0] * (1.0 + (delta[w.0] / sigma[w.0]));
+                        } else {
+                            // If w not in enabled_nodes, no path can end at w
+                            delta[v.0] += sigma[v.0] * (delta[w.0] / sigma[w.0]);
+                        }
+                    }
+                }
+                if w != s {
+                    centralities[w.0] += delta[w.0];
+                }
+            }
+        }
+        centralities.into_iter().map(|c| c as u64).collect()
+    }
+    fn distance_stress_centralities(&self, wot: &T, step_max: usize) -> Vec<u64> {
+        let wot_size = wot.size();
+        let mut centralities = vec![0.0; wot_size];
+        let enabled_nodes = wot.get_enabled();
+
+        // The source of any path belongs to enabled_nodes
+        for s in enabled_nodes.clone() {
+            let mut stack: Vec<WotId> = Vec::with_capacity(wot_size);
+            let mut paths: HashMap<WotId, Vec<WotId>> = HashMap::with_capacity(wot_size);
+            let mut sigma = vec![0.0; wot_size];
+            let mut d: Vec<isize> = vec![-1; wot_size];
+            let mut q: VecDeque<WotId> = VecDeque::with_capacity(wot_size);
+
+            sigma[s.0] = 1.0;
+            d[s.0] = 0;
+            q.push_back(s);
+            while let Some(v) = q.pop_front() {
+                stack.push(v);
+                if d[v.0] < step_max as isize {
+                    for w in wot.get_links_source(v).expect("v don't have any source !") {
+                        // w found for the first time ?
+                        if d[w.0] < 0 {
+                            q.push_back(w);
+                            d[w.0] = d[v.0] + 1;
+                        }
+                        // Shortest path to w via v
+                        if d[w.0] == d[v.0] + 1 {
+                            sigma[w.0] += sigma[v.0];
+                            paths.entry(w).or_insert_with(Vec::new).push(v);
+                        }
+                    }
+                }
+            }
+            let mut delta = vec![0.0; wot_size];
+            // stack returns vertices in order of non-increasing distance from s
+            while let Some(w) = stack.pop() {
+                if paths.contains_key(&w) {
+                    for v in paths.get(&w).expect("Not found w in p !") {
+                        if enabled_nodes.contains(&w) {
+                            delta[v.0] += sigma[v.0] * (1.0 + (delta[w.0] / sigma[w.0]));
+                        } else {
+                            // If w not in enabled_nodes, no path can end at w
+                            delta[v.0] += sigma[v.0] * (delta[w.0] / sigma[w.0]);
+                        }
+                    }
+                }
+                if w != s {
+                    centralities[w.0] += delta[w.0];
+                }
+            }
+        }
+        centralities.into_iter().map(|c| c as u64).collect()
+    }
+}
diff --git a/dubp-wot/src/operations/density.rs b/dubp-wot/src/operations/density.rs
new file mode 100644
index 0000000000000000000000000000000000000000..76ae9a0f2ca00e540c9ad55c64d2b1dafaf2e8cf
--- /dev/null
+++ b/dubp-wot/src/operations/density.rs
@@ -0,0 +1,31 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Provide function to compute average density.
+
+use crate::data::WebOfTrust;
+
+/// Compute average density
+pub fn calculate_average_density<T: WebOfTrust>(wot: &T) -> usize {
+    let enabled_members = wot.get_enabled();
+    let enabled_members_count = enabled_members.len();
+    let mut count_actives_links: usize = 0;
+    for member in &enabled_members {
+        count_actives_links += wot
+            .issued_count(*member)
+            .unwrap_or_else(|| panic!("Fail to get issued_count of wot_id {}", (*member).0));
+    }
+    ((count_actives_links as f32 / enabled_members_count as f32) * 1_000.0) as usize
+}
diff --git a/dubp-wot/src/operations/distance.rs b/dubp-wot/src/operations/distance.rs
new file mode 100644
index 0000000000000000000000000000000000000000..da6362e77688f14d159c8791f830890b870da590
--- /dev/null
+++ b/dubp-wot/src/operations/distance.rs
@@ -0,0 +1,211 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Provide a trait and implementations to compute distances.
+
+use crate::data::WebOfTrust;
+use crate::data::WotId;
+use rayon::prelude::*;
+use std::collections::HashSet;
+
+/// Paramters for `WoT` distance calculations
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct WotDistanceParameters {
+    /// Node from where distances are calculated.
+    pub node: WotId,
+    /// Links count received AND issued to be a sentry.
+    pub sentry_requirement: u32,
+    /// Currency parameter.
+    pub step_max: u32,
+    /// Currency parameter.
+    pub x_percent: f64,
+}
+
+/// Results of `WebOfTrust::compute_distance`.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct WotDistance {
+    /// Sentries count
+    pub sentries: u32,
+    /// Success count
+    pub success: u32,
+    /// Succes at border count
+    pub success_at_border: u32,
+    /// Reached count
+    pub reached: u32,
+    /// Reached at border count
+    pub reached_at_border: u32,
+    /// Is the node outdistanced ?
+    pub outdistanced: bool,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+/// Error occured when computing distance
+pub enum DistanceError {
+    /// Node don't exist
+    NodeDontExist(WotId),
+}
+
+/// Compute distance between nodes of a `WebOfTrust`.
+pub trait DistanceCalculator<T: WebOfTrust> {
+    /// Compute distance between a node and the network.
+    /// Returns `None` if this node doesn't exist.
+    fn compute_distance(
+        &self,
+        wot: &T,
+        params: WotDistanceParameters,
+    ) -> Result<WotDistance, DistanceError>;
+
+    /// Compute distances of all members
+    fn compute_distances(
+        &self,
+        wot: &T,
+        sentry_requirement: u32,
+        step_max: u32,
+        x_percent: f64,
+    ) -> Result<(usize, Vec<usize>, usize, Vec<usize>), DistanceError>;
+
+    /// Test if a node is outdistanced in the network.
+    /// Returns `Node` if this node doesn't exist.
+    fn is_outdistanced(
+        &self,
+        wot: &T,
+        params: WotDistanceParameters,
+    ) -> Result<bool, DistanceError>;
+}
+
+/// Calculate distances between 2 members in a `WebOfTrust`.
+#[derive(Debug, Clone, Copy)]
+pub struct RustyDistanceCalculator;
+
+impl<T: WebOfTrust + Sync> DistanceCalculator<T> for RustyDistanceCalculator {
+    fn compute_distance(
+        &self,
+        wot: &T,
+        params: WotDistanceParameters,
+    ) -> Result<WotDistance, DistanceError> {
+        let WotDistanceParameters {
+            node,
+            sentry_requirement,
+            step_max,
+            x_percent,
+        } = params;
+
+        if node.0 >= wot.size() {
+            return Err(DistanceError::NodeDontExist(node));
+        }
+
+        let mut area = HashSet::new();
+        area.insert(node);
+        let mut border = HashSet::new();
+        border.insert(node);
+
+        for _ in 0..step_max {
+            border = border
+                .par_iter()
+                .map(|&id| {
+                    if let Some(links_source) = wot.get_links_source(id) {
+                        Ok(links_source
+                            .iter()
+                            .filter(|source| !area.contains(source))
+                            .cloned()
+                            .collect::<HashSet<_>>())
+                    } else {
+                        Err(DistanceError::NodeDontExist(id))
+                    }
+                })
+                .try_reduce(HashSet::new, |mut acc, sources| {
+                    for source in sources {
+                        acc.insert(source);
+                    }
+                    Ok(acc)
+                })?;
+            area.extend(border.iter());
+        }
+
+        let sentries: Vec<_> = wot.get_sentries(sentry_requirement as usize);
+        let mut success = area.iter().filter(|n| sentries.contains(n)).count() as u32;
+        let success_at_border = border.iter().filter(|n| sentries.contains(n)).count() as u32;
+        let mut sentries = sentries.len() as u32;
+        if wot
+            .is_sentry(node, sentry_requirement as usize)
+            .ok_or(DistanceError::NodeDontExist(node))?
+        {
+            sentries -= 1;
+            success -= 1;
+        }
+
+        Ok(WotDistance {
+            sentries,
+            reached: area.len() as u32 - 1,
+            reached_at_border: border.len() as u32,
+            success,
+            success_at_border,
+            outdistanced: f64::from(success) < ((x_percent * f64::from(sentries)).trunc() - 1.0),
+        })
+    }
+
+    fn is_outdistanced(
+        &self,
+        wot: &T,
+        params: WotDistanceParameters,
+    ) -> Result<bool, DistanceError> {
+        Self::compute_distance(&self, wot, params).map(|result| result.outdistanced)
+    }
+
+    fn compute_distances(
+        &self,
+        wot: &T,
+        sentry_requirement: u32,
+        step_max: u32,
+        x_percent: f64,
+    ) -> Result<(usize, Vec<usize>, usize, Vec<usize>), DistanceError> {
+        let members_count = wot.get_enabled().len();
+        let mut distances = Vec::new();
+        let mut average_distance: usize = 0;
+        let mut connectivities = Vec::new();
+        let mut average_connectivity: usize = 0;
+        for i in 0..wot.size() {
+            let distance_datas: WotDistance = Self::compute_distance(
+                &self,
+                wot,
+                WotDistanceParameters {
+                    node: WotId(i),
+                    sentry_requirement,
+                    step_max,
+                    x_percent,
+                },
+            )?;
+            let distance = ((f64::from(distance_datas.success)
+                / (x_percent * f64::from(distance_datas.sentries)))
+                * 100.0) as usize;
+            distances.push(distance);
+            average_distance += distance;
+            let connectivity =
+                ((f64::from(distance_datas.success - distance_datas.success_at_border)
+                    / (x_percent * f64::from(distance_datas.sentries)))
+                    * 100.0) as usize;
+            connectivities.push(connectivity);
+            average_connectivity += connectivity;
+        }
+        average_distance /= members_count;
+        average_connectivity /= members_count;
+        Ok((
+            average_distance,
+            distances,
+            average_connectivity,
+            connectivities,
+        ))
+    }
+}
diff --git a/dubp-wot/src/operations/mod.rs b/dubp-wot/src/operations/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e3fb53ca1cbe492ef0f6b2aba76f6a3d2f8ad133
--- /dev/null
+++ b/dubp-wot/src/operations/mod.rs
@@ -0,0 +1,21 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Provide operation traits and implementations on `WebOfTrust` objects.
+
+pub mod centrality;
+pub mod density;
+pub mod distance;
+pub mod path;
diff --git a/dubp-wot/src/operations/path.rs b/dubp-wot/src/operations/path.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5aef5149c1ebe34d2bf9cbebe63a752522c08858
--- /dev/null
+++ b/dubp-wot/src/operations/path.rs
@@ -0,0 +1,107 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Provide a trait and implementations to find paths between nodes.
+
+use crate::data::WebOfTrust;
+use crate::data::WotId;
+use std::collections::HashSet;
+
+/// Find paths between 2 nodes of a `WebOfTrust`.
+pub trait PathFinder<T: WebOfTrust> {
+    /// Get paths from one node to the other.
+    fn find_paths(&self, wot: &T, from: WotId, to: WotId, k_max: u32) -> Vec<Vec<WotId>>;
+}
+
+/// A new "rusty-er" implementation of `WoT` path finding.
+#[derive(Debug, Clone, Copy)]
+pub struct RustyPathFinder;
+
+impl<T: WebOfTrust> PathFinder<T> for RustyPathFinder {
+    fn find_paths(&self, wot: &T, from: WotId, to: WotId, k_max: u32) -> Vec<Vec<WotId>> {
+        if from.0 >= wot.size() || to.0 >= wot.size() {
+            return vec![];
+        }
+
+        // 1. We explore the k_max area around `to`, and only remember backward
+        //    links of the smallest distance.
+
+        // Stores for each node its distance to `to` node and its backward links.
+        // By default all nodes are out of range (`k_max + 1`) and links are known.
+        let mut graph: Vec<(u32, Vec<WotId>)> =
+            (0..wot.size()).map(|_| (k_max + 1, vec![])).collect();
+        // `to` node is at distance 0, and have no backward links.
+        graph[to.0] = (0, vec![]);
+        // Explored zone border.
+        let mut border = HashSet::new();
+        border.insert(to);
+
+        for distance in 1..=k_max {
+            let mut next_border = HashSet::new();
+
+            for node in border {
+                for source in &wot
+                    .get_links_source(node)
+                    .expect("links source must not be None")
+                {
+                    match graph[source.0].0 {
+                        path_distance if path_distance > distance => {
+                            // shorter path, we replace
+                            graph[source.0] = (distance, vec![node]);
+                            next_border.insert(*source);
+                        }
+                        path_distance if path_distance == distance => {
+                            // same length, we combine
+                            graph[source.0].1.push(node);
+                            next_border.insert(*source);
+                        }
+                        _ => unreachable!(),
+                    }
+                }
+            }
+
+            border = next_border;
+        }
+
+        // 2. If `from` is found, we follow the backward links and build paths.
+        //    For each path, we look at the last element sources and build new paths with them.
+        let mut paths = vec![vec![from]];
+
+        for _ in 1..=k_max {
+            let mut new_paths = vec![];
+
+            for path in &paths {
+                let node = path.last().expect("path should not be empty");
+
+                if node == &to {
+                    // If path is complete, we keep it.
+                    new_paths.push(path.clone())
+                } else {
+                    // If not complete we comlete paths
+                    let sources = &graph[node.0];
+                    for source in &sources.1 {
+                        let mut new_path = path.clone();
+                        new_path.push(*source);
+                        new_paths.push(new_path);
+                    }
+                }
+            }
+
+            paths = new_paths;
+        }
+
+        paths
+    }
+}
diff --git a/dubp-wot/tests/g1_genesis.bin b/dubp-wot/tests/g1_genesis.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d684f6197a5442ff4860dec581a65b19456a51d4
Binary files /dev/null and b/dubp-wot/tests/g1_genesis.bin differ
diff --git a/global/Cargo.toml b/global/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..aff8d1f8ca1ce8a90ddd25d82ef7c122c549b649
--- /dev/null
+++ b/global/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "duniter-global"
+version = "1.8.1"
+authors = ["librelois <elois@duniter.org>"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[dependencies]
+async-rwlock = "1.3.0"
+dubp = { version = "0.51.0", features = ["duniter"] }
+duniter-dbs = { path = "../dbs" }
+flume = "0.10"
+mockall = { version = "0.9", optional = true }
+once_cell = "1.5"
+tokio = { version = "1.2", features = ["io-util", "rt-multi-thread"] }
+
+[features]
+mock = ["mockall"]
diff --git a/global/src/lib.rs b/global/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ddfd94679f6915e09d45d5810932cfa92ab378b9
--- /dev/null
+++ b/global/src/lib.rs
@@ -0,0 +1,155 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+pub use tokio;
+
+use async_rwlock::RwLock;
+use dubp::wallet::prelude::SourceAmount;
+use duniter_dbs::BlockMetaV2;
+use once_cell::sync::OnceCell;
+use std::ops::Deref;
+
+pub static SELF_ENDPOINTS: RwLock<Option<Vec<String>>> = RwLock::new(None);
+
+static ASYNC_RUNTIME: OnceCell<tokio::runtime::Runtime> = OnceCell::new();
+static CURRENT_META: RwLock<Option<CurrentMeta>> = RwLock::new(None);
+static SELF_PEER_OLD: RwLock<Option<duniter_dbs::PeerCardDbV1>> = RwLock::new(None);
+
+#[derive(Clone, Copy, Debug, Default)]
+pub struct CurrentMeta {
+    pub current_ud: SourceAmount,
+    pub current_block_meta: BlockMetaV2,
+}
+
+#[derive(Clone, Debug)]
+pub enum GlobalBackGroundTaskMsg {
+    InitCurrentMeta(CurrentMeta),
+    NewCurrentBlock(BlockMetaV2),
+    GetSelfEndpoints(flume::Sender<Option<Vec<String>>>),
+    SetSelfPeerOld(duniter_dbs::PeerCardDbV1),
+}
+
+pub async fn start_global_background_task(recv: flume::Receiver<GlobalBackGroundTaskMsg>) {
+    tokio::spawn(async move {
+        while let Ok(msg) = recv.recv_async().await {
+            match msg {
+                GlobalBackGroundTaskMsg::InitCurrentMeta(current_meta) => {
+                    let mut write_guard = CURRENT_META.write().await;
+                    write_guard.replace(current_meta);
+                }
+                GlobalBackGroundTaskMsg::NewCurrentBlock(current_block_meta) => {
+                    let upgradable_read_guard = CURRENT_META.upgradable_read().await;
+                    let new_current_meta = if let Some(dividend) = current_block_meta.dividend {
+                        CurrentMeta {
+                            current_ud: dividend,
+                            current_block_meta,
+                        }
+                    } else if let Some(current_meta) = upgradable_read_guard.deref() {
+                        CurrentMeta {
+                            current_ud: current_meta.current_ud,
+                            current_block_meta,
+                        }
+                    } else {
+                        CurrentMeta {
+                            current_ud: SourceAmount::ZERO,
+                            current_block_meta,
+                        }
+                    };
+                    let mut write_guard =
+                        async_rwlock::RwLockUpgradableReadGuard::upgrade(upgradable_read_guard)
+                            .await;
+                    write_guard.replace(new_current_meta);
+                }
+                GlobalBackGroundTaskMsg::GetSelfEndpoints(sender) => {
+                    let read_guard = SELF_ENDPOINTS.read().await;
+                    let _ = sender.send_async(read_guard.deref().clone()).await;
+                }
+                GlobalBackGroundTaskMsg::SetSelfPeerOld(self_peer_old) => {
+                    let mut write_guard = SELF_PEER_OLD.write().await;
+                    write_guard.replace(self_peer_old);
+                }
+            }
+        }
+    });
+}
+
+pub fn get_async_runtime() -> &'static tokio::runtime::Runtime {
+    ASYNC_RUNTIME.get_or_init(|| {
+        tokio::runtime::Builder::new_multi_thread()
+            .enable_all()
+            .build()
+            .expect("fail to build tokio runtime")
+    })
+}
+
+#[derive(Clone, Copy, Debug, Default)]
+pub struct AsyncAccessor;
+
+impl AsyncAccessor {
+    pub fn new() -> Self {
+        AsyncAccessor
+    }
+    pub async fn get_current_meta<D: 'static, F: 'static + FnOnce(&CurrentMeta) -> D>(
+        &self,
+        f: F,
+    ) -> Option<D> {
+        let read_guard = CURRENT_META.read().await;
+        if let Some(current_meta) = read_guard.deref() {
+            Some(f(current_meta))
+        } else {
+            None
+        }
+    }
+    pub async fn get_self_peer_old<
+        D: 'static,
+        F: 'static + FnOnce(&duniter_dbs::PeerCardDbV1) -> D,
+    >(
+        &self,
+        f: F,
+    ) -> Option<D> {
+        let read_guard = SELF_PEER_OLD.read().await;
+        if let Some(self_peer_old) = read_guard.deref() {
+            Some(f(self_peer_old))
+        } else {
+            None
+        }
+    }
+}
+
+#[cfg(feature = "mock")]
+mockall::mock! {
+    pub AsyncAccessor {
+        pub async fn get_current_meta<D: 'static, F: 'static + FnOnce(&CurrentMeta) -> D>(
+            &self,
+            f: F,
+        ) -> Option<D>;
+        pub async fn get_self_peer_old<
+            D: 'static,
+            F: 'static + FnOnce(&duniter_dbs::PeerCardDbV1) -> D,
+        >(
+            &self,
+            f: F,
+        ) -> Option<D>;
+    }
+}
diff --git a/mempools/Cargo.toml b/mempools/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..1da4c92ae87f2e4ed5fb002005fd1a7bd201d242
--- /dev/null
+++ b/mempools/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "duniter-mempools"
+version = "0.1.0"
+authors = ["elois <elois@duniter.org>"]
+description = "Duniter mempools"
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+keywords = ["dubp", "duniter", "blockchain", "mempool"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+dubp = { version = "0.51.0", features = ["duniter"] }
+duniter-dbs = { path = "../dbs" }
+duniter-bc-reader = { path = "../bc-reader" }
+duniter-dbs-write-ops = { path = "../dbs-write-ops" }
+log = "0.4.11"
+thiserror = "1.0.20"
+
+[dev-dependencies]
diff --git a/mempools/src/lib.rs b/mempools/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8a8870aae42202d6ebf28b21f9486d2337ed5ef1
--- /dev/null
+++ b/mempools/src/lib.rs
@@ -0,0 +1,133 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+use std::borrow::Cow;
+
+use dubp::common::crypto::keys::ed25519::PublicKey;
+use dubp::documents::prelude::*;
+use dubp::documents::transaction::TransactionDocumentV10;
+use duniter_dbs::kv_typed::prelude::*;
+use duniter_dbs::{
+    databases::bc_v2::BcV2DbReadable,
+    databases::txs_mp_v2::{TxsMpV2Db, TxsMpV2DbReadable},
+};
+use thiserror::Error;
+
+#[derive(Clone, Copy, Debug, Default)]
+pub struct Mempools {
+    pub txs: TxsMempool,
+}
+
+#[derive(Debug, Error)]
+pub enum TxMpError {
+    #[error("{0}")]
+    Db(KvError),
+    #[error("Mempool full")]
+    Full,
+    #[error("Transaction already written in blockchain")]
+    TxAlreadyWritten,
+}
+
+impl From<KvError> for TxMpError {
+    fn from(e: KvError) -> Self {
+        TxMpError::Db(e)
+    }
+}
+
+#[derive(Clone, Copy, Debug, Default)]
+pub struct TxsMempool {
+    max_size: usize,
+}
+
+impl TxsMempool {
+    pub fn new(max_size: usize) -> Self {
+        TxsMempool { max_size }
+    }
+    pub fn accept_new_tx<BcDb: BcV2DbReadable, TxsMpDb: TxsMpV2DbReadable>(
+        &self,
+        bc_db_ro: &BcDb,
+        server_pubkey: PublicKey,
+        tx: TransactionDocumentV10,
+        txs_mp_db_ro: &TxsMpDb,
+    ) -> Result<(), TxMpError> {
+        if duniter_bc_reader::tx_exist(bc_db_ro, tx.get_hash())? {
+            Err(TxMpError::TxAlreadyWritten)
+        } else if tx.issuers().contains(&server_pubkey)
+            || txs_mp_db_ro.txs().count()? < self.max_size
+        {
+            Ok(())
+        } else {
+            Err(TxMpError::Full)
+        }
+    }
+
+    pub fn add_pending_tx<B: Backend, BcDb: BcV2DbReadable>(
+        &self,
+        bc_db_ro: &BcDb,
+        server_pubkey: PublicKey,
+        txs_mp_db: &TxsMpV2Db<B>,
+        tx: &TransactionDocumentV10,
+    ) -> Result<(), TxMpError> {
+        if duniter_bc_reader::tx_exist(bc_db_ro, tx.get_hash())? {
+            Err(TxMpError::TxAlreadyWritten)
+        } else if tx.issuers().contains(&server_pubkey) {
+            duniter_dbs_write_ops::txs_mp::add_pending_tx(
+                |_, _| Ok(()),
+                txs_mp_db,
+                Cow::Borrowed(tx),
+            )?;
+            Ok(())
+        } else {
+            duniter_dbs_write_ops::txs_mp::add_pending_tx(
+                |_tx, txs| {
+                    if txs.count()? >= self.max_size {
+                        Err(KvError::Custom(TxMpError::Full.into()))
+                    } else {
+                        Ok(())
+                    }
+                },
+                txs_mp_db,
+                Cow::Borrowed(tx),
+            )?;
+            Ok(())
+        }
+    }
+
+    #[doc(hidden)]
+    pub fn add_pending_tx_force<B: Backend>(
+        &self,
+        txs_mp_db: &TxsMpV2Db<B>,
+        tx: &TransactionDocumentV10,
+    ) -> KvResult<()> {
+        duniter_dbs_write_ops::txs_mp::add_pending_tx(|_, _| Ok(()), txs_mp_db, Cow::Borrowed(tx))?;
+        Ok(())
+    }
+
+    pub fn get_free_rooms<TxsMpDb: TxsMpV2DbReadable>(
+        &self,
+        txs_mp_db_ro: &TxsMpDb,
+    ) -> KvResult<usize> {
+        Ok(self.max_size - txs_mp_db_ro.txs().count()?)
+    }
+}
diff --git a/module/Cargo.toml b/module/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..b61f4707d7fb7df1a6b5bcf1ef91cf30cc9a7b80
--- /dev/null
+++ b/module/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "duniter-module"
+version = "0.1.0"
+authors = ["librelois <elois@duniter.org>"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[dependencies]
+anyhow = "1.0.34"
+async-trait = "0.1.41"
+dubp = { version = "0.51.0", features = ["duniter"] }
+duniter-conf = { path = "../conf" }
+duniter-dbs = { path = "../dbs" }
+duniter-global = { path = "../global" }
+duniter-mempools = { path = "../mempools" }
+fast-threadpool = "0.2.3"
+log = "0.4"
+
+[dev-dependencies]
+duniter-dbs = { path = "../dbs", features = ["mem"] }
+paste = "1.0.2"
+tokio = { version = "1.2", features = ["macros", "rt"] }
diff --git a/module/src/lib.rs b/module/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..79060eabac9e3dd473b0a592bf5565ef9ffa2fda
--- /dev/null
+++ b/module/src/lib.rs
@@ -0,0 +1,344 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+use dubp::{
+    block::DubpBlockV10,
+    common::prelude::{BlockNumber, Blockstamp},
+    crypto::{hashs::Hash, keys::ed25519::PublicKey},
+    documents::transaction::TransactionDocumentV10,
+};
+use duniter_conf::{DuniterConf, DuniterMode};
+use duniter_dbs::{kv_typed::prelude::*, FileBackend, SharedDbs};
+use duniter_mempools::Mempools;
+use std::path::Path;
+
+pub const SOFTWARE_NAME: &str = "duniter";
+
+pub type Endpoint = String;
+
+#[async_trait::async_trait]
+pub trait DuniterModule: 'static + Sized {
+    const INDEX_BLOCKS: bool = false;
+
+    /// This function is called only if Self::INDEX_BLOCKS is true,
+    /// in this case it must be reimplemented because the default implementation panics.
+    fn apply_block(
+        _block: &DubpBlockV10,
+        _conf: &DuniterConf,
+        _profile_path_opt: Option<&Path>,
+    ) -> KvResult<()> {
+        unreachable!()
+    }
+
+    /// This function is called only if Self::INDEX_BLOCKS is true,
+    /// in this case it must be reimplemented because the default implementation panics.
+    fn revert_block(
+        _block: &DubpBlockV10,
+        _conf: &DuniterConf,
+        _profile_path_opt: Option<&Path>,
+    ) -> KvResult<()> {
+        unreachable!()
+    }
+
+    fn init(
+        conf: &DuniterConf,
+        currency: &str,
+        dbs_pool: &fast_threadpool::ThreadPoolAsyncHandler<SharedDbs<FileBackend>>,
+        mempools: Mempools,
+        mode: DuniterMode,
+        profile_path_opt: Option<&Path>,
+        software_version: &'static str,
+    ) -> anyhow::Result<(Self, Vec<Endpoint>)>;
+
+    async fn start(self) -> anyhow::Result<()>;
+
+    // Needed for BMA only, will be removed when the migration is complete.
+    #[doc(hidden)]
+    fn get_transactions_history_for_bma(
+        _dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+        _profile_path_opt: Option<&Path>,
+        _pubkey: PublicKey,
+    ) -> KvResult<Option<TxsHistoryForBma>> {
+        Ok(None)
+    }
+    // Needed for BMA only, will be removed when the migration is complete.
+    #[doc(hidden)]
+    fn get_tx_by_hash(
+        _dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+        _hash: Hash,
+        _profile_path_opt: Option<&Path>,
+    ) -> KvResult<Option<(TransactionDocumentV10, Option<BlockNumber>)>> {
+        Ok(None)
+    }
+}
+
+// Needed for BMA only, will be removed when the migration is complete.
+#[doc(hidden)]
+#[derive(Default)]
+pub struct TxsHistoryForBma {
+    pub sent: Vec<(TransactionDocumentV10, Blockstamp, i64)>,
+    pub received: Vec<(TransactionDocumentV10, Blockstamp, i64)>,
+    pub sending: Vec<TransactionDocumentV10>,
+    pub pending: Vec<TransactionDocumentV10>,
+}
+
+#[macro_export]
+macro_rules! plug_duniter_modules {
+    ([$($M:ty),*], $TxsHistoryForBma:ident) => {
+        paste::paste! {
+            use anyhow::Context as _;
+            #[allow(dead_code)]
+            fn apply_block_modules(
+                block: Arc<DubpBlockV10>,
+                conf: Arc<duniter_conf::DuniterConf>,
+                dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+                profile_path_opt: Option<std::path::PathBuf>,
+            ) -> KvResult<()> {
+                $(
+                    let [<$M:snake>] = if <$M>::INDEX_BLOCKS {
+                        let block_arc_clone = Arc::clone(&block);
+                        let conf_arc_clone = Arc::clone(&conf);
+                        let profile_path_opt_clone = profile_path_opt.clone();
+                        Some(dbs_pool
+                        .launch(move |_| <$M>::apply_block(
+                            &block_arc_clone,
+                            &conf_arc_clone,
+                            profile_path_opt_clone.as_deref()
+                        ))
+                        .expect("thread pool disconnected"))
+                    } else {
+                        None
+                    };
+                )*
+                $(
+                    if let Some(join_handle) = [<$M:snake>] {
+                        join_handle.join().expect("thread pool disconnected")?;
+                    }
+                )*
+                Ok(())
+            }
+            #[allow(dead_code)]
+            fn apply_chunk_of_blocks_modules(
+                blocks: Arc<[DubpBlockV10]>,
+                conf: Arc<duniter_conf::DuniterConf>,
+                dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+                profile_path_opt: Option<std::path::PathBuf>,
+            ) -> KvResult<()> {
+                $(
+                    let [<$M:snake>] = if <$M>::INDEX_BLOCKS {
+                        let blocks_arc_clone = Arc::clone(&blocks);
+                        let conf_arc_clone = Arc::clone(&conf);
+                        let profile_path_opt_clone = profile_path_opt.clone();
+                        Some(dbs_pool
+                            .launch(move |_| {
+                                use std::ops::Deref as _;
+                                for block in blocks_arc_clone.deref() {
+                                    <$M>::apply_block(&block, &conf_arc_clone, profile_path_opt_clone.as_deref())?;
+                                }
+                                Ok::<_, KvError>(())
+                            })
+                            .expect("thread pool disconnected"))
+                    } else {
+                        None
+                    };
+                )*
+                $(
+                    if let Some(join_handle) = [<$M:snake>] {
+                        join_handle.join().expect("thread pool disconnected")?;
+                    }
+                )*
+                Ok(())
+            }
+            #[allow(dead_code)]
+            fn revert_block_modules(
+                block: Arc<DubpBlockV10>,
+                conf: Arc<duniter_conf::DuniterConf>,
+                dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+                profile_path_opt: Option<std::path::PathBuf>,
+            ) -> KvResult<()> {
+                $(
+                    let [<$M:snake>] = if <$M>::INDEX_BLOCKS {
+                        let block_arc_clone = Arc::clone(&block);
+                        let conf_arc_clone = Arc::clone(&conf);
+                        let profile_path_opt_clone = profile_path_opt.clone();
+                        Some(dbs_pool
+                        .launch(move |_| <$M>::revert_block(
+                            &block_arc_clone,
+                            &conf_arc_clone,
+                            profile_path_opt_clone.as_deref()
+                        ))
+                        .expect("thread pool disconnected"))
+                    } else {
+                        None
+                    };
+                )*
+                $(
+                    if let Some(join_handle) = [<$M:snake>] {
+                        join_handle.join().expect("thread pool disconnected")?;
+                    }
+                )*
+                Ok(())
+            }
+            async fn start_duniter_modules(
+                conf: &DuniterConf,
+                currency: String,
+                dbs_pool: fast_threadpool::ThreadPoolAsyncHandler<SharedDbs<FileBackend>>,
+                mempools: duniter_mempools::Mempools,
+                mode: DuniterMode,
+                profile_path_opt: Option<std::path::PathBuf>,
+                software_version: &'static str,
+            ) -> anyhow::Result<()> {
+                let mut all_endpoints = Vec::<String>::new();
+                $(
+                    let ([<$M:snake>], mut endpoints) =<$M>::init(conf, &currency, &dbs_pool, mempools, mode, profile_path_opt.as_deref(), software_version)
+                        .with_context(|| format!("Fail to init module '{}'", stringify!($M)))?;
+                    all_endpoints.append(&mut endpoints);
+                )*
+
+                log::info!("TMP DEBUG SELF_ENDPOINTS={:?}", all_endpoints);
+                duniter_global::SELF_ENDPOINTS.write().await.replace(all_endpoints);
+
+                $(
+                    let [<$M:snake _handle>] = tokio::spawn([<$M:snake>].start());
+                )*
+
+                $(
+                    [<$M:snake _handle>].await.map_err(|e| if e.is_cancelled() {
+                        anyhow::Error::msg(format!("Module '{}' cancelled", stringify!($M)))
+                    } else {
+                        anyhow::Error::msg(format!("Module '{}' panic", stringify!($M)))
+                    })?
+                    .with_context(|| format!("Error on execution of module '{}'", stringify!($M)))?;
+                )*
+
+                Ok(())
+            }
+
+            // Needed for BMA only, will be removed when the migration is complete.
+            #[allow(dead_code)]
+            #[doc(hidden)]
+            fn get_transactions_history_for_bma(
+                dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+                profile_path_opt: Option<&Path>,
+                pubkey: PublicKey,
+            ) -> KvResult<TxsHistoryForBma> {
+                $(
+                    if let Some(txs_history) = <$M>::get_transactions_history_for_bma(dbs_pool, profile_path_opt, pubkey)? {
+                        return Ok(txs_history);
+                    }
+                )*
+                Ok(TxsHistoryForBma::default())
+            }
+            // Needed for BMA only, will be removed when the migration is complete.
+            #[allow(dead_code)]
+            #[doc(hidden)]
+            fn get_tx_by_hash(
+                dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
+                hash: Hash,
+                profile_path_opt: Option<&Path>,
+            ) -> KvResult<Option<(TransactionDocumentV10, Option<BlockNumber>)>> {
+                $(
+                    if let Some(tx_with_wb) = <$M>::get_tx_by_hash(dbs_pool, hash, profile_path_opt)? {
+                        return Ok(Some(tx_with_wb));
+                    }
+                )*
+                Ok(None)
+            }
+        }
+    };
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use duniter_mempools::TxsMempool;
+
+    struct TestMod1;
+
+    #[async_trait::async_trait]
+    impl DuniterModule for TestMod1 {
+        fn init(
+            _conf: &DuniterConf,
+            _currency: &str,
+            _dbs_pool: &fast_threadpool::ThreadPoolAsyncHandler<SharedDbs<FileBackend>>,
+            _mempools: Mempools,
+            _mode: DuniterMode,
+            profile_path_opt: Option<&Path>,
+            _software_version: &'static str,
+        ) -> anyhow::Result<(Self, Vec<Endpoint>)> {
+            if let Some(profile_path) = profile_path_opt {
+                let _file_path = profile_path.join("test_mod1.json");
+            }
+            Ok((TestMod1, vec![]))
+        }
+
+        async fn start(self) -> anyhow::Result<()> {
+            Ok(())
+        }
+    }
+
+    struct TestMod2;
+
+    #[async_trait::async_trait]
+    impl DuniterModule for TestMod2 {
+        fn init(
+            _conf: &DuniterConf,
+            _currency: &str,
+            _dbs_pool: &fast_threadpool::ThreadPoolAsyncHandler<SharedDbs<FileBackend>>,
+            _mempools: Mempools,
+            _mode: DuniterMode,
+            _profile_path_opt: Option<&Path>,
+            _software_version: &'static str,
+        ) -> anyhow::Result<(Self, Vec<Endpoint>)> {
+            Ok((TestMod2, vec![]))
+        }
+
+        async fn start(self) -> anyhow::Result<()> {
+            Ok(())
+        }
+    }
+
+    #[tokio::test]
+    async fn test_macro_plug_duniter_modules() -> anyhow::Result<()> {
+        plug_duniter_modules!([TestMod1, TestMod2], TxsHistoryForBma);
+
+        let dbs = SharedDbs::mem()?;
+        let threadpool =
+            fast_threadpool::ThreadPool::start(fast_threadpool::ThreadPoolConfig::default(), dbs);
+
+        start_duniter_modules(
+            &DuniterConf::default(),
+            "test".to_owned(),
+            threadpool.into_async_handler(),
+            Mempools {
+                txs: TxsMempool::new(0),
+            },
+            DuniterMode::Sync,
+            None,
+            "",
+        )
+        .await?;
+        Ok(())
+    }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..aaf82a40fdae6d0b2951ea919113c28f2b25a299
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,21 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+pub use duniter_conf as conf;
+pub use duniter_dbs as dbs;
+pub use duniter_dbs_write_ops as dbs_write_ops;
+pub use duniter_global as global;
+pub use duniter_mempools as mempools;
+pub use duniter_module as module;
diff --git a/tools/kv_typed/Cargo.toml b/tools/kv_typed/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..6e0eacd9f646445cdb1de266daf9705d44c88570
--- /dev/null
+++ b/tools/kv_typed/Cargo.toml
@@ -0,0 +1,53 @@
+[package]
+name = "kv_typed"
+version = "0.1.0"
+authors = ["elois <c@elo.tf>"]
+description = "Strongly typed key-value storage"
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+keywords = ["database", "key", "sled"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+byteorder = "1.3.4"
+cfg-if = "0.1.10"
+flume = "0.10.0"
+leveldb_minimal = { version = "0.1.0", optional = true }
+lmdb-zero = { version = "0.4.4", optional = true }
+parking_lot = "0.11.0"
+paste = "1.0.2"
+rayon = { version = "1.3.1", optional = true }
+regex = { version = "1.3.9", optional = true }
+serde_json = { version = "1.0.53", optional = true }
+sled = { version = "0.34.6", optional = true, features = ["compression"] }
+smallvec = { version = "1.4.0", features = ["serde", "write"] }
+thiserror = "1.0.20"
+uninit = "0.4.0"
+zerocopy = "0.3.0"
+
+[[bench]]
+name = "compare_backends"
+harness = false
+required-features = ["leveldb_backend", "sled_backend"]
+
+[dev-dependencies]
+async-std = { version = "1.6.3", features = ["attributes"] }
+maybe-async = "0.2.0"
+smallvec = { version = "1.4.0", features = ["serde", "write"] }
+tempfile = "3.2.0"
+unwrap = "1.2.1"
+
+# Benches dependencies
+criterion = { version = "0.3.1" }
+
+[features]
+default = ["leveldb_backend", "sled_backend"]
+
+async = []
+explorer = ["rayon", "regex", "serde_json"]
+leveldb_backend = ["leveldb_minimal"]
+lmdb_backend = ["lmdb-zero"]
+sled_backend = ["sled"]
diff --git a/tools/kv_typed/benches/compare_backends.rs b/tools/kv_typed/benches/compare_backends.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d21049b49bfd48fc0a26f5e2e5b5e3c910ec6d5c
--- /dev/null
+++ b/tools/kv_typed/benches/compare_backends.rs
@@ -0,0 +1,146 @@
+use criterion::{criterion_group, criterion_main, Criterion, /*, AxisScale, PlotConfiguration*/};
+use kv_typed::prelude::*;
+use std::{fmt::Debug, path::PathBuf};
+
+kv_typed::db_schema!(Test, [["c1", Col1, u32, String],]);
+//const LEVELDB_DIR_PATH: &str = "/dev/shm/kv_typed/benches/compare_backends/leveldb";
+//const LMDB_DIR_PATH: &str = "/dev/shm/kv_typed/benches/compare_backends/lmdb";
+const LEVELDB_DIR_PATH: &str = "/home/elois/tmp/kv_typed/benches/compare_backends/leveldb";
+const LMDB_DIR_PATH: &str = "/home/elois/tmp/kv_typed/benches/compare_backends/lmdb";
+const SLED_DIR_PATH: &str = "/home/elois/tmp/kv_typed/benches/compare_backends/sled";
+static SMALL_VAL: &str = "abcdefghijklmnopqrst";
+static LARGE_VAL: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+fn read_n_entries<B: Backend>(db: &TestDb<B>, n: u32, val: String) {
+    for i in 0..n {
+        assert_eq!(db.col1().get(&i).expect("db err"), Some(val.clone()));
+    }
+    /*db.col1().iter(.., |iter| {
+        let mut iter = iter.values();
+        for _ in 0..n {
+            assert_eq!(iter.next_res().expect(""), Some(val.clone()));
+            //assert_eq!(db.col1().get(&i).expect(""), Some(val.clone()));
+        }
+        assert_eq!(iter.next_res().expect(""), None);
+    });*/
+}
+fn remove_and_write_n_entries<B: Backend>(db: &TestDb<B>, n: u32, val: String) {
+    for i in 0..n {
+        db.col1_write().remove(i).expect("fail to write");
+        db.col1_write()
+            .upsert(i, val.clone())
+            .expect("fail to write");
+    }
+}
+fn write_n_entries<B: Backend>(db: &TestDb<B>, n: u32, val: String) {
+    for i in 0..n {
+        db.col1_write()
+            .upsert(i, val.clone())
+            .expect("fail to write");
+    }
+}
+
+pub fn benchmark(c: &mut Criterion) {
+    // Read chart config
+    //let read_chart_config = PlotConfiguration::default().summary_scale(AxisScale::Logarithmic);
+
+    // Create DBs
+    std::fs::create_dir_all(LEVELDB_DIR_PATH).expect("fail to create leveldb dir");
+    let leveldb_db = TestDb::<LevelDb>::open(LevelDbConf {
+        db_path: PathBuf::from(LEVELDB_DIR_PATH),
+        ..Default::default()
+    })
+    .expect("fail to open db");
+    /*let lmdb_db =
+    TestDb::<Lmdb>::open(LmdbConf::default().folder_path(PathBuf::from(LMDB_DIR_PATH)))
+        .expect("fail to open db");*/
+    //let mem_db = TestDb::<Mem>::open(MemConf::default()).expect("fail to open db");
+    let sled_db =
+        TestDb::<Sled>::open(SledConf::default().path(SLED_DIR_PATH)).expect("fail to open db");
+
+    // Test write small values
+    let mut group = c.benchmark_group("write small values");
+    /*group.bench_function("lmdb", |b| {
+        b.iter(|| remove_and_write_n_entries(&lmdb_db, 100, String::from(SMALL_VAL)))
+    });*/
+    group.bench_function("leveldb", |b| {
+        b.iter(|| remove_and_write_n_entries(&leveldb_db, 100, String::from(SMALL_VAL)))
+    });
+    /*group.bench_function("mem", |b| {
+        b.iter(|| remove_and_write_n_entries(&mem_db, 100, String::from(SMALL_VAL)))
+    });*/
+    group.bench_function("sled", |b| {
+        b.iter(|| remove_and_write_n_entries(&sled_db, 100, String::from(SMALL_VAL)))
+    });
+    group.finish();
+
+    // Prepare read test
+    //write_n_entries(&lmdb_db, 100, String::from(SMALL_VAL));
+    write_n_entries(&leveldb_db, 100, String::from(SMALL_VAL));
+    //write_n_entries(&mem_db, 100, String::from(SMALL_VAL));
+    write_n_entries(&sled_db, 100, String::from(SMALL_VAL));
+
+    // Test read small values
+    let mut group = c.benchmark_group("read small values");
+    //group.plot_config(read_chart_config.clone());
+    /*group.bench_function("lmdb", |b| {
+        b.iter(|| read_n_entries(&lmdb_db, 100, String::from(SMALL_VAL)))
+    });*/
+    group.bench_function("leveldb", |b| {
+        b.iter(|| read_n_entries(&leveldb_db, 100, String::from(SMALL_VAL)))
+    });
+    /*group.bench_function("mem", |b| {
+        b.iter(|| read_n_entries(&mem_db, 100, String::from(SMALL_VAL)))
+    });*/
+    group.bench_function("sled", |b| {
+        b.iter(|| read_n_entries(&sled_db, 100, String::from(SMALL_VAL)))
+    });
+    group.finish();
+
+    // Test write large values
+    let mut group = c.benchmark_group("write large values");
+    /*group.bench_function("lmdb", |b| {
+        b.iter(|| remove_and_write_n_entries(&lmdb_db, 100, String::from(LARGE_VAL)))
+    });*/
+    group.bench_function("leveldb", |b| {
+        b.iter(|| remove_and_write_n_entries(&leveldb_db, 100, String::from(LARGE_VAL)))
+    });
+    /*group.bench_function("mem", |b| {
+        b.iter(|| remove_and_write_n_entries(&mem_db, 100, String::from(LARGE_VAL)))
+    });*/
+    group.bench_function("sled", |b| {
+        b.iter(|| remove_and_write_n_entries(&sled_db, 100, String::from(LARGE_VAL)))
+    });
+    group.finish();
+
+    // Prepare read test
+    //write_n_entries(&lmdb_db, 100, String::from(LARGE_VAL));
+    write_n_entries(&leveldb_db, 100, String::from(LARGE_VAL));
+    //write_n_entries(&mem_db, 100, String::from(LARGE_VAL));
+    write_n_entries(&sled_db, 100, String::from(LARGE_VAL));
+
+    // Test read large values
+    let mut group = c.benchmark_group("read large values");
+    //group.plot_config(read_chart_config);
+    /*group.bench_function("lmdb", |b| {
+        b.iter(|| read_n_entries(&lmdb_db, 100, String::from(LARGE_VAL)))
+    });*/
+    group.bench_function("leveldb", |b| {
+        b.iter(|| read_n_entries(&leveldb_db, 100, String::from(LARGE_VAL)))
+    });
+    /*group.bench_function("mem", |b| {
+        b.iter(|| read_n_entries(&mem_db, 100, String::from(LARGE_VAL)))
+    });*/
+    group.bench_function("sled", |b| {
+        b.iter(|| read_n_entries(&sled_db, 100, String::from(LARGE_VAL)))
+    });
+    group.finish();
+
+    // Close DBs
+    std::fs::remove_dir_all(LEVELDB_DIR_PATH).expect("fail to remove leveldb dir");
+    std::fs::remove_dir_all(LMDB_DIR_PATH).expect("fail to remove lmdb dir");
+    std::fs::remove_dir_all(SLED_DIR_PATH).expect("fail to remove sled dir");
+}
+
+criterion_group!(benches, benchmark);
+criterion_main!(benches);
diff --git a/tools/kv_typed/src/as_bytes.rs b/tools/kv_typed/src/as_bytes.rs
new file mode 100644
index 0000000000000000000000000000000000000000..46e0e3f9f5ac1a451d2c89bace8a5bed76a820b3
--- /dev/null
+++ b/tools/kv_typed/src/as_bytes.rs
@@ -0,0 +1,86 @@
+use crate::*;
+
+pub trait AsBytes {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, f: F) -> T;
+}
+
+impl AsBytes for () {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(&[])
+    }
+}
+
+impl AsBytes for String {
+    fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+        f(self.as_bytes())
+    }
+}
+
+impl<T> AsBytes for Vec<T>
+where
+    T: zerocopy::AsBytes,
+{
+    fn as_bytes<D, F: FnMut(&[u8]) -> D>(&self, mut f: F) -> D {
+        use zerocopy::AsBytes as _;
+        f((&self[..]).as_bytes())
+    }
+}
+
+macro_rules! impl_as_bytes_for_smallvec {
+    ($($N:literal),*) => {$(
+        impl<T> AsBytes for SmallVec<[T; $N]>
+        where
+            T: zerocopy::AsBytes,
+        {
+            fn as_bytes<D, F: FnMut(&[u8]) -> D>(&self, mut f: F) -> D {
+                use zerocopy::AsBytes as _;
+                f((&self[..]).as_bytes())
+            }
+        }
+    )*};
+}
+impl_as_bytes_for_smallvec!(1, 2, 4, 8, 16, 32, 64);
+
+impl<T> AsBytes for BTreeSet<T>
+where
+    T: zerocopy::AsBytes + Copy,
+{
+    fn as_bytes<D, F: FnMut(&[u8]) -> D>(&self, mut f: F) -> D {
+        use zerocopy::AsBytes as _;
+        f((&self.iter().copied().collect::<SmallVec<[T; 32]>>()[..]).as_bytes())
+    }
+}
+
+impl<T> AsBytes for HashSet<T>
+where
+    T: zerocopy::AsBytes + Copy,
+{
+    fn as_bytes<D, F: FnMut(&[u8]) -> D>(&self, mut f: F) -> D {
+        use zerocopy::AsBytes as _;
+        f((&self.iter().copied().collect::<SmallVec<[T; 32]>>()[..]).as_bytes())
+    }
+}
+
+macro_rules! impl_as_bytes_for_le_numbers {
+    ($($T:ty),*) => {$(
+        impl AsBytes for $T {
+            fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+                f(&self.to_le_bytes()[..])
+            }
+        }
+    )*};
+}
+impl_as_bytes_for_le_numbers!(
+    usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64
+);
+
+macro_rules! impl_as_bytes_for_be_numbers {
+    ($($T:ty),*) => {$(
+        impl AsBytes for $T {
+            fn as_bytes<T, F: FnMut(&[u8]) -> T>(&self, mut f: F) -> T {
+                f(&self.0.to_be_bytes()[..])
+            }
+        }
+    )*};
+}
+impl_as_bytes_for_be_numbers!(U32BE, U64BE);
diff --git a/tools/kv_typed/src/backend.rs b/tools/kv_typed/src/backend.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5e91cc21d5e569bf2314c533f25e4bc56861b7b5
--- /dev/null
+++ b/tools/kv_typed/src/backend.rs
@@ -0,0 +1,100 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed Backend Trait
+
+#[cfg(feature = "leveldb_backend")]
+pub mod leveldb;
+#[cfg(feature = "lmdb_backend")]
+pub mod lmdb;
+pub mod memory;
+pub mod memory_singleton;
+#[cfg(feature = "mock")]
+pub mod mock;
+#[cfg(feature = "sled_backend")]
+pub mod sled;
+
+use crate::*;
+
+pub trait Backend: 'static + Clone + Sized {
+    const NAME: &'static str;
+    type Col: BackendCol;
+    type Conf: Default;
+
+    fn open(conf: &Self::Conf) -> KvResult<Self>;
+    fn open_col(&mut self, conf: &Self::Conf, col_name: &str) -> KvResult<Self::Col>;
+}
+
+pub trait BackendCol: 'static + Clone + Debug + Send + Sync {
+    type Batch: BackendBatch;
+    type KeyBytes: KeyBytes;
+    type ValueBytes: ValueBytes;
+    type Iter: BackendIter<Self::KeyBytes, Self::ValueBytes>;
+
+    fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>>;
+    fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>>;
+    fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>>;
+    fn clear(&mut self) -> KvResult<()>;
+    fn contains_key<K: Key>(&self, k: &K) -> KvResult<bool>;
+    fn count(&self) -> KvResult<usize>;
+    fn iter<K: Key, V: Value>(&self, range: RangeBytes) -> Self::Iter;
+    fn iter_ref_slice<D, K, V, F>(
+        &self,
+        range: RangeBytes,
+        f: F,
+    ) -> KvInnerIterRefSlice<Self, D, K, V, F>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+    {
+        KvInnerIterRefSlice {
+            backend_iter: self.iter::<K, V>(range),
+            f,
+            phantom: PhantomData,
+        }
+    }
+    fn put<K: Key, V: Value>(&mut self, k: &K, value: &V) -> KvResult<()>;
+    fn delete<K: Key>(&mut self, k: &K) -> KvResult<()>;
+    fn new_batch() -> Self::Batch;
+    fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()>;
+    fn save(&self) -> KvResult<()>;
+}
+
+pub trait BackendIter<K: KeyBytes, V: ValueBytes>:
+    Iterator<Item = Result<(K, V), DynErr>> + ReversableIterator
+{
+}
+
+#[cfg_attr(feature = "mock", mockall::automock)]
+pub trait BackendBatch: Debug + Default {
+    fn upsert(&mut self, k: &[u8], v: &[u8]);
+    fn remove(&mut self, k: &[u8]);
+}
+
+#[cfg(feature = "mock")]
+impl Debug for MockBackendBatch {
+    fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        unimplemented!()
+    }
+}
diff --git a/tools/kv_typed/src/backend/leveldb.rs b/tools/kv_typed/src/backend/leveldb.rs
new file mode 100644
index 0000000000000000000000000000000000000000..1ecd676c8a07ed59e73ac81c77eeacb08406a37f
--- /dev/null
+++ b/tools/kv_typed/src/backend/leveldb.rs
@@ -0,0 +1,364 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! LevelDb backend for KV Typed
+
+use crate::*;
+pub use leveldb_minimal::database::batch::{Batch as _, Writebatch as WriteBatch};
+use leveldb_minimal::database::cache::Cache as LevelDbCache;
+pub use leveldb_minimal::database::error::Error as LevelDbError;
+use leveldb_minimal::database::iterator::Iterator as LevelDbIterator;
+pub use leveldb_minimal::database::Database as LevelDbDb;
+use leveldb_minimal::iterator::{Iterable, LevelDBIterator as _};
+use leveldb_minimal::kv::KV as _;
+pub use leveldb_minimal::options::{Options as LevelDbOptions, ReadOptions, WriteOptions};
+use leveldb_minimal::Compression;
+use std::path::PathBuf;
+
+#[derive(Clone, Copy, Debug)]
+pub struct LevelDb;
+
+impl Backend for LevelDb {
+    const NAME: &'static str = "leveldb";
+    type Col = LevelDbCol;
+    type Conf = LevelDbConf;
+
+    fn open(_conf: &Self::Conf) -> KvResult<Self> {
+        Ok(LevelDb)
+    }
+    fn open_col(&mut self, conf: &Self::Conf, col_name: &str) -> KvResult<Self::Col> {
+        Ok(LevelDbCol(Arc::new(LevelDbDb::open(
+            &conf.db_path.join(col_name),
+            conf.clone().into(),
+        )?)))
+    }
+}
+
+#[derive(Clone)]
+pub struct LevelDbCol(Arc<LevelDbDb>);
+
+impl Debug for LevelDbCol {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LevelDbCol")
+            .field("0", &"Arc<LevelDbDb>")
+            .finish()
+    }
+}
+
+#[derive(Default)]
+pub struct LevelDbBatch(WriteBatch);
+
+impl Debug for LevelDbBatch {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LevelDbBatch")
+            .field("0", &"WriteBatch")
+            .finish()
+    }
+}
+
+impl BackendBatch for LevelDbBatch {
+    fn upsert(&mut self, k: &[u8], v: &[u8]) {
+        self.0.put(k, v)
+    }
+
+    fn remove(&mut self, k: &[u8]) {
+        self.0.delete(k)
+    }
+}
+
+#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub struct LevelDbBytes(Vec<u8>);
+impl AsRef<[u8]> for LevelDbBytes {
+    fn as_ref(&self) -> &[u8] {
+        self.0.as_ref()
+    }
+}
+impl FromBytes for LevelDbBytes {
+    type Err = std::convert::Infallible;
+
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+        Ok(Self(bytes.into()))
+    }
+}
+
+impl BackendCol for LevelDbCol {
+    type Batch = LevelDbBatch;
+    type KeyBytes = LevelDbBytes;
+    type ValueBytes = LevelDbBytes;
+    type Iter = LevelDbIter;
+
+    #[inline(always)]
+    fn new_batch() -> Self::Batch {
+        LevelDbBatch(WriteBatch::default())
+    }
+    fn clear(&mut self) -> KvResult<()> {
+        let keys = self
+            .0
+            .iter(ReadOptions::new())
+            .map(|(k, _v)| k)
+            .collect::<Vec<Vec<u8>>>();
+        for key in keys {
+            self.0.delete(WriteOptions::new(), key.as_ref())?;
+        }
+        Ok(())
+    }
+    #[inline(always)]
+    fn count(&self) -> KvResult<usize> {
+        Ok(self
+            .0
+            .iter(ReadOptions {
+                verify_checksums: false,
+                fill_cache: false,
+                snapshot: None,
+            })
+            .count())
+    }
+    #[inline(always)]
+    fn contains_key<K: Key>(&self, k: &K) -> KvResult<bool> {
+        k.as_bytes(|k_bytes| Ok(self.0.get(ReadOptions::new(), k_bytes)?.is_some()))
+    }
+    #[inline(always)]
+    fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>> {
+        k.as_bytes(|k_bytes| {
+            self.0
+                .get(ReadOptions::new(), k_bytes)?
+                .map(|bytes| V::from_bytes(&bytes).map_err(|e| KvError::DeserError(e.into())))
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            self.0
+                .get(ReadOptions::new(), k_bytes)?
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, V::Ref>::new(bytes.as_ref())
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            self.0
+                .get(ReadOptions::new(), k_bytes)?
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, [V::Elem]>::new_slice(
+                            &bytes[V::prefix_len()..],
+                        )
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn delete<K: Key>(&mut self, k: &K) -> KvResult<()> {
+        k.as_bytes(|k_bytes| self.0.delete(WriteOptions::new(), k_bytes))?;
+        Ok(())
+    }
+    #[inline(always)]
+    fn put<K: Key, V: Value>(&mut self, k: &K, value: &V) -> KvResult<()> {
+        value.as_bytes(|value_bytes| {
+            k.as_bytes(|k_bytes| self.0.put(WriteOptions::new(), k_bytes, value_bytes))?;
+            Ok(())
+        })
+    }
+    #[inline(always)]
+    fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()> {
+        self.0.write(WriteOptions::new(), &inner_batch.0)?;
+        Ok(())
+    }
+    #[inline(always)]
+    fn iter<K: Key, V: Value>(&self, range: RangeBytes) -> Self::Iter {
+        LevelDbIter::new(self.0.iter(ReadOptions::new()), range)
+    }
+    #[inline(always)]
+    fn save(&self) -> KvResult<()> {
+        Ok(())
+    }
+}
+
+pub struct LevelDbIter {
+    inner: LevelDbIterator,
+    range_start: Bound<IVec>,
+    range_end: Bound<IVec>,
+    reversed: bool,
+}
+impl LevelDbIter {
+    fn new(inner: LevelDbIterator, range: RangeBytes) -> Self {
+        LevelDbIter {
+            inner,
+            range_start: range.0,
+            range_end: range.1,
+            reversed: false,
+        }
+    }
+}
+impl Debug for LevelDbIter {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LevelDbIter")
+            .field("inner", &"LevelDbIterator<'db>")
+            .field("range_start", &self.range_start)
+            .field("range_end", &self.range_end)
+            .finish()
+    }
+}
+
+impl Iterator for LevelDbIter {
+    type Item = Result<(LevelDbBytes, LevelDbBytes), DynErr>;
+
+    #[inline(always)]
+    fn next(&mut self) -> Option<Self::Item> {
+        loop {
+            match self
+                .inner
+                .next()
+                .map(|(k, v)| Ok((LevelDbBytes(k), LevelDbBytes(v))))
+            {
+                Some(Ok((key_bytes, value_bytes))) => {
+                    let start_bound_ok = match &self.range_start {
+                        Bound::Included(start_bytes) => key_bytes.as_ref() >= start_bytes.as_ref(),
+                        Bound::Excluded(start_bytes) => key_bytes.as_ref() > start_bytes.as_ref(),
+                        Bound::Unbounded => true,
+                    };
+                    let end_bound_ok = match &self.range_end {
+                        Bound::Included(end_bytes) => key_bytes.as_ref() <= end_bytes.as_ref(),
+                        Bound::Excluded(end_bytes) => key_bytes.as_ref() < end_bytes.as_ref(),
+                        Bound::Unbounded => true,
+                    };
+                    if start_bound_ok {
+                        if end_bound_ok {
+                            break Some(Ok((key_bytes, value_bytes)));
+                        } else if self.reversed {
+                            // The interval has not yet begun.
+                            continue;
+                        } else {
+                            // The range has been fully traversed, the iterator is finished.
+                            break None;
+                        }
+                    } else if end_bound_ok {
+                        if self.reversed {
+                            // The range has been fully traversed, the iterator is finished.
+                            break None;
+                        } else {
+                            // The interval has not yet begun.
+                            continue;
+                        }
+                    } else {
+                        // Empty range, the iterator is finished.
+                        break None;
+                    }
+                }
+                other => break other,
+            }
+        }
+    }
+}
+impl ReversableIterator for LevelDbIter {
+    #[inline(always)]
+    fn reverse(self) -> Self {
+        LevelDbIter {
+            range_start: self.range_start,
+            range_end: self.range_end,
+            reversed: !self.reversed,
+            inner: self.inner.reverse(),
+        }
+    }
+}
+impl BackendIter<LevelDbBytes, LevelDbBytes> for LevelDbIter {}
+
+#[derive(Clone, Debug)]
+/// leveldb configuration
+pub struct LevelDbConf {
+    pub create_if_missing: bool,
+    pub db_path: PathBuf,
+    pub error_if_exists: bool,
+    pub paranoid_checks: bool,
+    pub write_buffer_size: Option<usize>,
+    pub max_open_files: Option<i32>,
+    pub block_size: Option<usize>,
+    pub block_restart_interval: Option<i32>,
+    pub compression: bool,
+    pub cache: Option<usize>,
+}
+
+impl LevelDbConf {
+    pub fn path(db_path: PathBuf) -> Self {
+        Self {
+            db_path,
+            ..Default::default()
+        }
+    }
+}
+
+impl Default for LevelDbConf {
+    fn default() -> Self {
+        LevelDbConf {
+            create_if_missing: true,
+            db_path: PathBuf::default(),
+            error_if_exists: false,
+            paranoid_checks: false,
+            write_buffer_size: None,
+            max_open_files: None,
+            block_size: None,
+            block_restart_interval: None,
+            compression: true,
+            cache: None,
+        }
+    }
+}
+
+impl Into<LevelDbOptions> for LevelDbConf {
+    fn into(self) -> LevelDbOptions {
+        LevelDbOptions {
+            create_if_missing: self.create_if_missing,
+            error_if_exists: self.error_if_exists,
+            paranoid_checks: self.paranoid_checks,
+            write_buffer_size: self.write_buffer_size,
+            max_open_files: self.max_open_files,
+            block_size: self.block_size,
+            block_restart_interval: self.block_restart_interval,
+            compression: if self.compression {
+                Compression::Snappy
+            } else {
+                Compression::No
+            },
+            cache: self.cache.map(LevelDbCache::new),
+        }
+    }
+}
diff --git a/tools/kv_typed/src/backend/lmdb.rs b/tools/kv_typed/src/backend/lmdb.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ceb728f23659142347e925f5c253145d864a619c
--- /dev/null
+++ b/tools/kv_typed/src/backend/lmdb.rs
@@ -0,0 +1,416 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Memory backend for KV Typed,
+
+use crate::*;
+use lmdb::{traits::CreateCursor as _, LmdbResultExt as _};
+use lmdb_zero as lmdb;
+use std::path::PathBuf;
+
+#[derive(Clone, Copy, Debug)]
+/// Be careful with this backend
+/// LMDB does not support multiple iterators in the same thread. So you need to make sure that :
+/// 1. Any iterator must be drop before any new call to the `iter()` method.
+/// 2. If you are in an asynchronous context, an async task should never yield when it to an instantiated iterator.
+pub struct Lmdb;
+
+#[derive(Clone, Debug)]
+pub struct LmdbConf {
+    folder_path: PathBuf,
+    temporary: bool,
+}
+impl Default for LmdbConf {
+    fn default() -> Self {
+        LmdbConf {
+            folder_path: PathBuf::default(),
+            temporary: false,
+        }
+    }
+}
+impl LmdbConf {
+    pub fn folder_path(mut self, folder_path: PathBuf) -> Self {
+        self.folder_path = folder_path;
+        self
+    }
+    pub fn temporary(mut self, temporary: bool) -> Self {
+        self.temporary = temporary;
+        self
+    }
+}
+
+impl Backend for Lmdb {
+    const NAME: &'static str = "lmdb";
+    type Col = LmdbCol;
+    type Conf = LmdbConf;
+
+    fn open(conf: &Self::Conf) -> KvResult<Self> {
+        std::fs::create_dir_all(conf.folder_path.as_path())?;
+        Ok(Lmdb)
+    }
+    fn open_col(&mut self, conf: &Self::Conf, col_name: &str) -> KvResult<Self::Col> {
+        let path: PathBuf = conf.folder_path.join(col_name);
+        let exist = path.as_path().exists();
+        if !exist {
+            std::fs::create_dir(path.as_path())?;
+        }
+        let path_to_remove = if conf.temporary {
+            Some(path.clone())
+        } else {
+            None
+        };
+        let path = path
+            .into_os_string()
+            .into_string()
+            .expect("Invalid DB path");
+        let mut env_flags = lmdb::open::Flags::empty();
+        env_flags.insert(lmdb::open::WRITEMAP);
+        env_flags.insert(lmdb::open::MAPASYNC);
+        env_flags.insert(lmdb::open::NOLOCK);
+        let col_options = if exist {
+            lmdb::DatabaseOptions::defaults()
+        } else {
+            lmdb::DatabaseOptions::new(lmdb::db::CREATE)
+        };
+        let env =
+            std::sync::Arc::new(unsafe { lmdb::EnvBuilder::new()?.open(&path, env_flags, 0o600)? });
+        let tree = std::sync::Arc::new(lmdb::Database::open(env.clone(), None, &col_options)?);
+        Ok(LmdbCol {
+            inner: LmdbColInner { env, tree },
+            path_to_remove,
+        })
+    }
+}
+
+#[derive(Clone, Debug)]
+pub struct LmdbCol {
+    inner: LmdbColInner,
+    path_to_remove: Option<PathBuf>,
+}
+
+impl Drop for LmdbCol {
+    fn drop(&mut self) {
+        if let Some(ref path) = self.path_to_remove {
+            let _ = std::fs::remove_dir(path);
+        }
+    }
+}
+
+#[derive(Clone, Debug)]
+struct LmdbColInner {
+    env: std::sync::Arc<lmdb::Environment>,
+    tree: std::sync::Arc<lmdb::Database<'static>>,
+}
+
+#[derive(Debug, Default)]
+pub struct LmdbBatch {
+    upsert_ops: Vec<(IVec, IVec)>,
+    remove_ops: Vec<IVec>,
+}
+
+impl BackendBatch for LmdbBatch {
+    fn upsert(&mut self, k: &[u8], v: &[u8]) {
+        self.upsert_ops.push((k.into(), v.into()));
+    }
+
+    fn remove(&mut self, k: &[u8]) {
+        self.remove_ops.push(k.into());
+    }
+}
+
+#[derive(Debug)]
+struct LmdbIterAccess {
+    env: std::sync::Arc<lmdb::Environment>,
+    access: lmdb::ConstAccessor<'static>,
+    tree: std::sync::Arc<lmdb::Database<'static>>,
+    tx: lmdb::ReadTransaction<'static>,
+}
+
+#[derive(Debug)]
+pub struct LmdbIter {
+    access: Arc<LmdbIterAccess>,
+    cursor: lmdb::Cursor<'static, 'static>,
+    reversed: bool,
+    started: bool,
+}
+
+impl LmdbIter {
+    fn new(
+        env: std::sync::Arc<lmdb::Environment>,
+        tree: std::sync::Arc<lmdb::Database<'static>>,
+    ) -> Self {
+        let tx = lmdb::ReadTransaction::new(env.clone()).expect("fail to read DB");
+        let tx_static: &'static lmdb::ReadTransaction<'static> =
+            unsafe { std::mem::transmute(&tx) };
+        let access = tx_static.access();
+        let cursor = tx_static
+            .cursor(tree.clone())
+            .expect("fail to create DB cursor");
+        LmdbIter {
+            access: Arc::new(LmdbIterAccess {
+                access,
+                env,
+                tree,
+                tx,
+            }),
+            cursor,
+            reversed: false,
+            started: false,
+        }
+    }
+}
+
+impl Iterator for LmdbIter {
+    type Item = Result<(&'static [u8], &'static [u8]), DynErr>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.reversed {
+            if self.started {
+                match self
+                    .cursor
+                    .prev::<[u8], [u8]>(unsafe {
+                        // # Safety
+                        // Lifetime of accessor is used to track db and lmdb_tx lifetimes: These are already static.
+                        // It's safe because the byte references will be transformed into K and V owned types before
+                        // being exposed to the user API.
+                        std::mem::transmute(&self.access.access)
+                    })
+                    .to_opt()
+                {
+                    Ok(Some((k, v))) => Some(Ok((k, v))),
+                    Ok(None) => None,
+                    Err(e) => Some(Err(e.into())),
+                }
+            } else {
+                self.started = true;
+                match self
+                    .cursor
+                    .last::<[u8], [u8]>(unsafe {
+                        // # Safety
+                        // Lifetime of accessor is used to track db and lmdb_tx lifetimes: These are already static.
+                        // It's safe because the byte references will be transformed into K and V owned types before
+                        // being exposed to the user API.
+                        std::mem::transmute(&self.access.access)
+                    })
+                    .to_opt()
+                {
+                    Ok(Some((k, v))) => Some(Ok((k, v))),
+                    Ok(None) => None,
+                    Err(e) => Some(Err(e.into())),
+                }
+            }
+        } else if self.started {
+            match self
+                .cursor
+                .next::<[u8], [u8]>(unsafe {
+                    // # Safety
+                    // Lifetime of accessor is used to track db and lmdb_tx lifetimes: These are already static.
+                    // It's safe because the byte references will be transformed into K and V owned types before
+                    // being exposed to the user API.
+                    std::mem::transmute(&self.access.access)
+                })
+                .to_opt()
+            {
+                Ok(Some((k, v))) => Some(Ok((k, v))),
+                Ok(None) => None,
+                Err(e) => Some(Err(e.into())),
+            }
+        } else {
+            self.started = true;
+            match self
+                .cursor
+                .first::<[u8], [u8]>(unsafe {
+                    // # Safety
+                    // Lifetime of accessor is used to track db and lmdb_tx lifetimes: These are already static.
+                    // It's safe because the byte references will be transformed into K and V owned types before
+                    // being exposed to the user API.
+                    std::mem::transmute(&self.access.access)
+                })
+                .to_opt()
+            {
+                Ok(Some((k, v))) => Some(Ok((k, v))),
+                Ok(None) => None,
+                Err(e) => Some(Err(e.into())),
+            }
+        }
+    }
+}
+
+impl ReversableIterator for LmdbIter {
+    fn reverse(mut self) -> Self {
+        self.reversed = true;
+        self
+    }
+}
+
+impl BackendIter<&'static [u8], &'static [u8]> for LmdbIter {}
+
+impl BackendCol for LmdbCol {
+    type Batch = LmdbBatch;
+    type KeyBytes = &'static [u8];
+    type ValueBytes = &'static [u8];
+    type Iter = LmdbIter;
+
+    #[inline(always)]
+    fn contains_key<K: Key>(&self, k: &K) -> KvResult<bool> {
+        let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
+        let access = tx.access();
+        k.as_bytes(|k_bytes| {
+            Ok(access
+                .get::<_, ()>(&self.inner.tree, k_bytes)
+                .to_opt()?
+                .is_some())
+        })
+    }
+
+    fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>> {
+        let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
+        let access = tx.access();
+        k.as_bytes(|k_bytes| {
+            access
+                .get(&self.inner.tree, k_bytes)
+                .to_opt()?
+                .map(|bytes| V::from_bytes(&bytes).map_err(|e| KvError::DeserError(e.into())))
+                .transpose()
+        })
+    }
+
+    fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
+            let access = tx.access();
+            access
+                .get::<_, [u8]>(&self.inner.tree, k_bytes)
+                .to_opt()?
+                .map(|bytes| {
+                    if let Some(layout_verified) = zerocopy::LayoutVerified::<_, V::Ref>::new(bytes)
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+
+    fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
+            let access = tx.access();
+            access
+                .get::<_, [u8]>(&self.inner.tree, k_bytes)
+                .to_opt()?
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, [V::Elem]>::new_slice(
+                            &bytes[V::prefix_len()..],
+                        )
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+
+    fn clear(&mut self) -> KvResult<()> {
+        let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
+        {
+            let mut access = tx.access();
+            access.clear_db(&self.inner.tree)?;
+        }
+        tx.commit()?;
+        Ok(())
+    }
+
+    fn count(&self) -> KvResult<usize> {
+        let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
+        Ok(tx.db_stat(&self.inner.tree)?.entries)
+    }
+
+    fn iter<K: Key, V: Value>(&self, _range: RangeBytes) -> Self::Iter {
+        LmdbIter::new(self.inner.env.clone(), self.inner.tree.clone())
+    }
+
+    fn put<K: Key, V: Value>(&mut self, k: &K, value: &V) -> KvResult<()> {
+        value.as_bytes(|v_bytes| {
+            let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
+            k.as_bytes(|k_bytes| {
+                let mut access = tx.access();
+                access.put(
+                    &self.inner.tree,
+                    k_bytes,
+                    v_bytes,
+                    lmdb::put::Flags::empty(),
+                )
+            })?;
+            tx.commit()?;
+            Ok(())
+        })
+    }
+
+    fn delete<K: Key>(&mut self, k: &K) -> KvResult<()> {
+        let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
+        k.as_bytes(|k_bytes| {
+            let mut access = tx.access();
+            access.del_key(&self.inner.tree, k_bytes).to_opt()
+        })?;
+        tx.commit()?;
+        Ok(())
+    }
+
+    fn new_batch() -> Self::Batch {
+        LmdbBatch::default()
+    }
+
+    fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()> {
+        let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
+        {
+            let mut access = tx.access();
+            for (k, v) in inner_batch.upsert_ops {
+                access.put(
+                    &self.inner.tree,
+                    k.as_ref(),
+                    v.as_ref(),
+                    lmdb::put::Flags::empty(),
+                )?;
+            }
+            for k in inner_batch.remove_ops {
+                access.del_key(&self.inner.tree, k.as_ref()).to_opt()?;
+            }
+        }
+        tx.commit()?;
+        Ok(())
+    }
+
+    fn save(&self) -> KvResult<()> {
+        Ok(self.inner.tree.env().sync(true)?)
+    }
+}
diff --git a/tools/kv_typed/src/backend/memory.rs b/tools/kv_typed/src/backend/memory.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f9c41870f73524584a6193d0c91e1ffa60490cc1
--- /dev/null
+++ b/tools/kv_typed/src/backend/memory.rs
@@ -0,0 +1,284 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Memory backend for KV Typed,
+
+use crate::*;
+use std::collections::BTreeMap;
+//use uninit::extension_traits::VecCapacity as _;
+
+#[derive(Clone, Copy, Debug)]
+pub struct Mem;
+
+#[derive(Clone, Debug, Default)]
+pub struct MemConf {
+    folder_path: Option<std::path::PathBuf>,
+}
+
+type KeyBytes = IVec;
+type ValueBytes = IVec;
+type Tree = BTreeMap<KeyBytes, ValueBytes>;
+
+impl Backend for Mem {
+    const NAME: &'static str = "mem";
+    type Col = MemCol;
+    type Conf = MemConf;
+
+    fn open(_conf: &Self::Conf) -> KvResult<Self> {
+        Ok(Mem)
+    }
+    fn open_col(&mut self, _conf: &Self::Conf, _col_name: &str) -> KvResult<Self::Col> {
+        /*if let Some(ref folder_path) = conf.folder_path {
+            MemCol::from_file(folder_path.join(col_name))
+        } else {*/
+        Ok(MemCol {
+            path: None,
+            tree: BTreeMap::new(),
+        })
+        //}
+    }
+}
+
+#[derive(Debug, Default)]
+pub struct MemBatch {
+    upsert_ops: Vec<(IVec, IVec)>,
+    remove_ops: Vec<IVec>,
+}
+
+impl BackendBatch for MemBatch {
+    fn upsert(&mut self, k: &[u8], v: &[u8]) {
+        self.upsert_ops.push((k.into(), v.into()));
+    }
+
+    fn remove(&mut self, k: &[u8]) {
+        self.remove_ops.push(k.into());
+    }
+}
+
+#[derive(Clone, Debug)]
+pub struct MemCol {
+    path: Option<std::path::PathBuf>,
+    tree: Tree,
+}
+
+impl BackendCol for MemCol {
+    type Batch = MemBatch;
+    type KeyBytes = KeyBytes;
+    type ValueBytes = ValueBytes;
+    type Iter = MemIter;
+
+    #[inline(always)]
+    fn new_batch() -> Self::Batch {
+        MemBatch::default()
+    }
+    #[inline(always)]
+    fn clear(&mut self) -> KvResult<()> {
+        self.tree.clear();
+        Ok(())
+    }
+    #[inline(always)]
+    fn count(&self) -> KvResult<usize> {
+        Ok(self.tree.len())
+    }
+    #[inline(always)]
+    fn contains_key<K: Key>(&self, k: &K) -> KvResult<bool> {
+        k.as_bytes(|k_bytes| Ok(self.tree.contains_key(k_bytes)))
+    }
+    #[inline(always)]
+    fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>> {
+        k.as_bytes(|k_bytes| {
+            self.tree
+                .get(k_bytes)
+                .map(|bytes| V::from_bytes(&bytes).map_err(|e| KvError::DeserError(e.into())))
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            self.tree
+                .get(k_bytes)
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, V::Ref>::new(bytes.as_ref())
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            self.tree
+                .get(k_bytes)
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, [V::Elem]>::new_slice(
+                            &bytes[V::prefix_len()..],
+                        )
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn delete<K: Key>(&mut self, k: &K) -> KvResult<()> {
+        k.as_bytes(|k_bytes| self.tree.remove(k_bytes));
+        Ok(())
+    }
+    #[inline(always)]
+    fn put<K: Key, V: Value>(&mut self, k: &K, value: &V) -> KvResult<()> {
+        value.as_bytes(|value_bytes| {
+            k.as_bytes(|k_bytes| {
+                self.tree.insert(k_bytes.into(), value_bytes.into());
+            });
+            Ok(())
+        })
+    }
+    #[inline(always)]
+    fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()> {
+        for (k, v) in inner_batch.upsert_ops {
+            self.tree.insert(k, v);
+        }
+        for k in inner_batch.remove_ops {
+            self.tree.remove(&k);
+        }
+        Ok(())
+    }
+    #[inline(always)]
+    fn iter<K: Key, V: Value>(&self, range: RangeBytes) -> Self::Iter {
+        MemIter::new(unsafe {
+            // # Safety
+            // On front API, the iterator is given to a closure executed inside of a `ColRo` method,
+            // so that ensure borrowed tree keep alive
+            std::mem::transmute(self.tree.range(range))
+        })
+    }
+    #[inline(always)]
+    fn save(&self) -> KvResult<()> {
+        /*if let Some(ref file_path) = self.path {
+            let bytes = Self::tree_to_bytes(&self.tree);
+
+            let mut file =
+                std::fs::File::create(file_path).map_err(|e| KvError::BackendError(e.into()))?;
+            use std::io::Write as _;
+            file.write_all(&bytes[..])
+                .map_err(|e| KvError::BackendError(e.into()))?;
+        }*/
+
+        Ok(())
+    }
+}
+
+pub struct MemIter {
+    iter: std::collections::btree_map::Range<'static, KeyBytes, ValueBytes>,
+    reversed: bool,
+}
+
+impl MemIter {
+    fn new(
+        tree_iter: std::collections::btree_map::Range<'static, KeyBytes, ValueBytes>,
+    ) -> MemIter {
+        MemIter {
+            iter: tree_iter,
+            reversed: false,
+        }
+    }
+}
+
+impl Debug for MemIter {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("MemIter").field("0", &"???").finish()
+    }
+}
+impl Iterator for MemIter {
+    type Item = Result<(KeyBytes, ValueBytes), DynErr>;
+
+    #[inline(always)]
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.reversed {
+            self.iter.next_back()
+        } else {
+            self.iter.next()
+        }
+        .map(|(k, v)| Ok((k.to_owned(), v.to_owned())))
+    }
+}
+
+impl ReversableIterator for MemIter {
+    #[inline(always)]
+    fn reverse(mut self) -> Self {
+        self.reversed = !self.reversed;
+        self
+    }
+}
+
+impl BackendIter<IVec, IVec> for MemIter {}
+
+#[cfg(test)]
+mod tests {
+    /*use super::*;
+
+    #[test]
+    fn test_save() -> KvResult<()> {
+        let mut tree = BTreeMap::new();
+
+        let k1 = IVec::from(&[1, 2, 3]);
+        let v1 = IVec::from(&[1, 2, 3, 4, 5]);
+        let k2 = IVec::from(&[1, 2]);
+        let v2 = IVec::from(&[]);
+        let k3 = IVec::from(&[1, 2, 3, 4, 5, 6, 7]);
+        let v3 = IVec::from(&[1, 2, 3, 4, 5, 6]);
+        let k4 = IVec::from(&[]);
+        let v4 = IVec::from(&[1, 2, 3, 4, 5, 6, 7]);
+
+        tree.insert(k1.clone(), v1.clone());
+        tree.insert(k2.clone(), v2.clone());
+        tree.insert(k3.clone(), v3.clone());
+        tree.insert(k4.clone(), v4.clone());
+
+        let bytes = MemCol::tree_to_bytes(&tree);
+
+        let tree2 = MemCol::tree_from_bytes(&bytes)?;
+
+        assert_eq!(tree2.len(), 4);
+        assert_eq!(tree2.get(&k1), Some(&v1));
+        assert_eq!(tree2.get(&k2), Some(&v2));
+        assert_eq!(tree2.get(&k3), Some(&v3));
+        assert_eq!(tree2.get(&k4), Some(&v4));
+
+        Ok(())
+    }*/
+}
diff --git a/tools/kv_typed/src/backend/memory_singleton.rs b/tools/kv_typed/src/backend/memory_singleton.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5f49a8cf3eb6ba4d6afce5618fd8fe59eb4f16d7
--- /dev/null
+++ b/tools/kv_typed/src/backend/memory_singleton.rs
@@ -0,0 +1,186 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Memory backend for KV Typed,
+
+use crate::*;
+
+#[derive(Clone, Copy, Debug)]
+pub struct MemSingleton;
+
+#[derive(Clone, Copy, Debug, Default)]
+pub struct MemSingletonConf {
+    phantom: PhantomData<()>,
+}
+
+type KeyBytes = IVec;
+type ValueBytes = IVec;
+
+impl Backend for MemSingleton {
+    const NAME: &'static str = "mem_singleton";
+    type Col = MemCol;
+    type Conf = MemSingletonConf;
+
+    fn open(_conf: &Self::Conf) -> KvResult<Self> {
+        Ok(MemSingleton)
+    }
+    fn open_col(&mut self, _conf: &Self::Conf, _col_name: &str) -> KvResult<Self::Col> {
+        Ok(MemCol(None))
+    }
+}
+
+#[derive(Debug, Default)]
+pub struct MemBatch(Option<IVec>);
+
+impl BackendBatch for MemBatch {
+    fn upsert(&mut self, _k: &[u8], v: &[u8]) {
+        self.0 = Some(v.into());
+    }
+
+    fn remove(&mut self, _k: &[u8]) {
+        self.0 = None;
+    }
+}
+
+#[derive(Clone, Debug)]
+pub struct MemCol(Option<ValueBytes>);
+
+impl BackendCol for MemCol {
+    type Batch = MemBatch;
+    type KeyBytes = KeyBytes;
+    type ValueBytes = ValueBytes;
+    type Iter = MemIter;
+
+    #[inline(always)]
+    fn new_batch() -> Self::Batch {
+        MemBatch::default()
+    }
+    #[inline(always)]
+    fn clear(&mut self) -> KvResult<()> {
+        self.0 = None;
+        Ok(())
+    }
+    #[inline(always)]
+    fn count(&self) -> KvResult<usize> {
+        if self.0.is_some() {
+            Ok(1)
+        } else {
+            Ok(0)
+        }
+    }
+    #[inline(always)]
+    fn contains_key<K: Key>(&self, _k: &K) -> KvResult<bool> {
+        Ok(self.0.is_some())
+    }
+    #[inline(always)]
+    fn get<K: Key, V: Value>(&self, _k: &K) -> KvResult<Option<V>> {
+        self.0
+            .as_ref()
+            .map(|bytes| V::from_bytes(bytes).map_err(|e| KvError::DeserError(e.into())))
+            .transpose()
+    }
+    #[inline(always)]
+    fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        _k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        self.0
+            .as_ref()
+            .map(|bytes| {
+                if let Some(layout_verified) =
+                    zerocopy::LayoutVerified::<_, V::Ref>::new(bytes.as_ref())
+                {
+                    f(&layout_verified)
+                } else {
+                    Err(KvError::DeserError(
+                        "Bytes are invalid length or alignment.".into(),
+                    ))
+                }
+            })
+            .transpose()
+    }
+    #[inline(always)]
+    fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        _k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        self.0
+            .as_ref()
+            .map(|bytes| {
+                if let Some(layout_verified) =
+                    zerocopy::LayoutVerified::<_, [V::Elem]>::new_slice(&bytes[V::prefix_len()..])
+                {
+                    f(&layout_verified)
+                } else {
+                    Err(KvError::DeserError(
+                        "Bytes are invalid length or alignment.".into(),
+                    ))
+                }
+            })
+            .transpose()
+    }
+    #[inline(always)]
+    fn delete<K: Key>(&mut self, _k: &K) -> KvResult<()> {
+        self.0 = None;
+        Ok(())
+    }
+    #[inline(always)]
+    fn put<K: Key, V: Value>(&mut self, _k: &K, value: &V) -> KvResult<()> {
+        value.as_bytes(|value_bytes| {
+            self.0 = Some(value_bytes.into());
+            Ok(())
+        })
+    }
+    #[inline(always)]
+    fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()> {
+        self.0 = inner_batch.0;
+        Ok(())
+    }
+    #[inline(always)]
+    fn iter<K: Key, V: Value>(&self, _: RangeBytes) -> Self::Iter {
+        MemIter(self.0.clone())
+    }
+    #[inline(always)]
+    fn save(&self) -> KvResult<()> {
+        Ok(())
+    }
+}
+
+pub struct MemIter(Option<ValueBytes>);
+
+impl Debug for MemIter {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("MemIter").field("0", &"???").finish()
+    }
+}
+impl Iterator for MemIter {
+    type Item = Result<(KeyBytes, ValueBytes), DynErr>;
+
+    #[inline(always)]
+    fn next(&mut self) -> Option<Self::Item> {
+        self.0.take().map(|v| Ok((KeyBytes::default(), v)))
+    }
+}
+
+impl ReversableIterator for MemIter {
+    #[inline(always)]
+    fn reverse(self) -> Self {
+        self
+    }
+}
+
+impl BackendIter<IVec, IVec> for MemIter {}
diff --git a/tools/kv_typed/src/backend/mock.rs b/tools/kv_typed/src/backend/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0d479a398f5caa7256334402b360790e937765b2
--- /dev/null
+++ b/tools/kv_typed/src/backend/mock.rs
@@ -0,0 +1,85 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed mock backend
+
+use super::MockBackendBatch;
+use crate::*;
+
+mockall::mock! {
+    pub BackendIter {}
+    trait Iterator {
+        type Item = Result<(IVec, IVec), DynErr>;
+
+        fn next(&mut self) -> Option<<Self as Iterator>::Item>;
+    }
+    trait ReversableIterator {
+        fn reverse(self) -> Self;
+    }
+}
+impl BackendIter<IVec, IVec> for MockBackendIter {}
+
+mockall::mock! {
+    pub BackendCol {}
+    trait Clone {
+        fn clone(&self) -> Self;
+    }
+    trait BackendCol {
+        type Batch = MockBackendBatch;
+        type KeyBytes = IVec;
+        type ValueBytes = IVec;
+        type Iter = MockBackendIter;
+
+        fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>>;
+        fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+            &self,
+            k: &K,
+            f: F,
+        ) -> KvResult<Option<D>>;
+        fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+            &self,
+            k: &K,
+            f: F,
+        ) -> KvResult<Option<D>>;
+        fn clear(&self) -> KvResult<()>;
+        fn count(&self) -> KvResult<usize>;
+        fn iter<K: Key, V: Value>(&self, range: RangeBytes) -> MockBackendIter;
+        fn put<K: Key, V: Value>(&self, k: &K, value: &V) -> KvResult<()>;
+        fn delete<K: Key>(&self, k: &K) -> KvResult<()>;
+        fn new_batch() -> MockBackendBatch;
+        fn write_batch(&self, inner_batch: MockBackendBatch) -> KvResult<()>;
+        fn save(&self) -> KvResult<()>;
+    }
+}
+impl Debug for MockBackendCol {
+    fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        unimplemented!()
+    }
+}
+
+mockall::mock! {
+    pub Backend {}
+    trait Clone {
+        fn clone(&self) -> Self;
+    }
+    trait Backend: 'static + Clone + Sized {
+        const NAME: &'static str = "mock";
+        type Col = MockBackendCol;
+        type Conf = ();
+
+        fn open(conf: &()) -> KvResult<Self>;
+        fn open_col(&mut self, conf: &(), col_name: &str) -> KvResult<MockBackendCol>;
+    }
+}
diff --git a/tools/kv_typed/src/backend/sled.rs b/tools/kv_typed/src/backend/sled.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3f97ef6de942d9010d7b20f663c77061bd91f899
--- /dev/null
+++ b/tools/kv_typed/src/backend/sled.rs
@@ -0,0 +1,205 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Sled backend for KV Typed,
+
+pub use sled::Config;
+
+use crate::*;
+
+#[derive(Clone, Debug)]
+pub struct Sled {
+    db: sled::Db,
+    trees: Vec<sled::Tree>,
+}
+
+impl Backend for Sled {
+    const NAME: &'static str = "sled";
+    type Col = SledCol;
+    type Conf = Config;
+
+    fn open(conf: &Self::Conf) -> KvResult<Self> {
+        Ok(Sled {
+            db: conf.open()?,
+            trees: Vec::new(),
+        })
+    }
+    fn open_col(&mut self, _conf: &Self::Conf, col_name: &str) -> KvResult<Self::Col> {
+        let tree = self.db.open_tree(col_name)?;
+        self.trees.push(tree.clone());
+        Ok(SledCol(tree))
+    }
+}
+
+impl BackendBatch for sled::Batch {
+    fn upsert(&mut self, k: &[u8], v: &[u8]) {
+        self.insert(k, v)
+    }
+
+    fn remove(&mut self, k: &[u8]) {
+        self.remove(k)
+    }
+}
+
+#[derive(Clone, Debug)]
+pub struct SledCol(sled::Tree);
+
+impl BackendCol for SledCol {
+    type Batch = sled::Batch;
+    type KeyBytes = IVec;
+    type ValueBytes = IVec;
+    type Iter = SledIter;
+
+    #[inline(always)]
+    fn new_batch() -> Self::Batch {
+        sled::Batch::default()
+    }
+    #[inline(always)]
+    fn clear(&mut self) -> KvResult<()> {
+        self.0.clear()?;
+        Ok(())
+    }
+    #[inline(always)]
+    fn count(&self) -> KvResult<usize> {
+        Ok(self.0.len())
+    }
+    #[inline(always)]
+    fn contains_key<K: Key>(&self, k: &K) -> KvResult<bool> {
+        k.as_bytes(|k_bytes| Ok(self.0.contains_key(k_bytes)?))
+    }
+    #[inline(always)]
+    fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>> {
+        k.as_bytes(|k_bytes| {
+            self.0
+                .get(k_bytes)?
+                .map(|bytes| V::from_bytes(&bytes).map_err(|e| KvError::DeserError(e.into())))
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn get_ref<K: Key, V: ValueZc, D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            self.0
+                .get(k_bytes)?
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, V::Ref>::new(bytes.as_ref())
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn get_ref_slice<K: Key, V: ValueSliceZc, D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        k.as_bytes(|k_bytes| {
+            self.0
+                .get(k_bytes)?
+                .map(|bytes| {
+                    if let Some(layout_verified) =
+                        zerocopy::LayoutVerified::<_, [V::Elem]>::new_slice(
+                            &bytes[V::prefix_len()..],
+                        )
+                    {
+                        f(&layout_verified)
+                    } else {
+                        Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        ))
+                    }
+                })
+                .transpose()
+        })
+    }
+    #[inline(always)]
+    fn delete<K: Key>(&mut self, k: &K) -> KvResult<()> {
+        k.as_bytes(|k_bytes| self.0.remove(k_bytes))?;
+        Ok(())
+    }
+    #[inline(always)]
+    fn put<K: Key, V: Value>(&mut self, k: &K, value: &V) -> KvResult<()> {
+        value.as_bytes(|value_bytes| {
+            k.as_bytes(|k_bytes| self.0.insert(k_bytes, value_bytes))?;
+            Ok(())
+        })
+    }
+    #[inline(always)]
+    fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()> {
+        self.0.apply_batch(inner_batch)?;
+        Ok(())
+    }
+    #[inline(always)]
+    fn iter<K: Key, V: Value>(&self, range: RangeBytes) -> Self::Iter {
+        SledIter {
+            iter: self.0.range(range),
+            reversed: false,
+        }
+    }
+    #[inline(always)]
+    fn save(&self) -> KvResult<()> {
+        self.0.flush()?;
+        Ok(())
+    }
+}
+
+pub struct SledIter {
+    iter: sled::Iter,
+    reversed: bool,
+}
+
+impl Debug for SledIter {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("SledIter")
+            .field("0", &"sled::Iter")
+            .finish()
+    }
+}
+impl Iterator for SledIter {
+    type Item = Result<(IVec, IVec), DynErr>;
+
+    #[inline(always)]
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.reversed {
+            self.iter.next_back()
+        } else {
+            self.iter.next()
+        }
+        .map(|res| res.map_err(Box::new).map_err(Into::into))
+    }
+}
+impl ReversableIterator for SledIter {
+    #[inline(always)]
+    fn reverse(self) -> Self {
+        SledIter {
+            iter: self.iter,
+            reversed: !self.reversed,
+        }
+    }
+}
+
+impl BackendIter<IVec, IVec> for SledIter {}
diff --git a/tools/kv_typed/src/batch.rs b/tools/kv_typed/src/batch.rs
new file mode 100644
index 0000000000000000000000000000000000000000..51d120ea6a96597c7af36f9e5436e6360a8fbb9f
--- /dev/null
+++ b/tools/kv_typed/src/batch.rs
@@ -0,0 +1,106 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+use std::collections::{BTreeMap, HashMap, HashSet};
+
+#[derive(Debug)]
+pub struct Batch<BC: BackendCol, C: DbCollectionRw> {
+    phantom: PhantomData<BC>,
+    pub(crate) tree: BTreeMap<IVec, Option<IVec>>,
+    upsert_ops: HashMap<C::K, C::V>,
+    delete_ops: HashSet<C::K>,
+}
+
+#[derive(Debug, PartialEq)]
+pub enum BatchGet<'v, V: Value> {
+    None,
+    Deleted,
+    Updated(&'v V),
+}
+
+impl<BC: BackendCol, C: DbCollectionRw> Default for Batch<BC, C> {
+    fn default() -> Self {
+        Batch {
+            phantom: PhantomData,
+            tree: BTreeMap::default(),
+            upsert_ops: HashMap::default(),
+            delete_ops: HashSet::default(),
+        }
+    }
+}
+
+impl<BC: BackendCol, C: DbCollectionRw> Batch<BC, C> {
+    pub fn clear(&mut self) {
+        self.tree.clear();
+        self.upsert_ops.clear();
+        self.delete_ops.clear();
+    }
+    pub fn get(&self, k: &C::K) -> BatchGet<C::V> {
+        if self.delete_ops.contains(k) {
+            BatchGet::Deleted
+        } else if let Some(v) = self.upsert_ops.get(k) {
+            BatchGet::Updated(v)
+        } else {
+            BatchGet::None
+        }
+    }
+    pub fn upsert(&mut self, k: C::K, v: C::V) {
+        let _ = k.as_bytes(|k_bytes| {
+            v.as_bytes(|v_bytes| {
+                self.tree
+                    .insert(IVec::from(k_bytes), Some(IVec::from(v_bytes)));
+            })
+        });
+        self.upsert_ops.insert(k, v);
+    }
+    pub fn remove(&mut self, k: C::K) {
+        let _ = k.as_bytes(|k_bytes| {
+            self.tree.insert(IVec::from(k_bytes), None);
+        });
+        self.upsert_ops.remove(&k);
+        self.delete_ops.insert(k);
+    }
+    #[doc(hidden)]
+    pub fn into_backend_batch(self) -> BC::Batch {
+        let mut backend_batch = BC::Batch::default();
+        for (k_bytes, v_bytes_opt) in self.tree {
+            if let Some(v_bytes) = v_bytes_opt {
+                backend_batch.upsert(k_bytes.as_ref(), v_bytes.as_ref());
+            } else {
+                backend_batch.remove(k_bytes.as_ref());
+            }
+        }
+        backend_batch
+    }
+    #[doc(hidden)]
+    pub fn into_backend_batch_and_events(self) -> (BC::Batch, SmallVec<[C::Event; 4]>) {
+        let mut backend_batch = BC::Batch::default();
+        for (k_bytes, v_bytes_opt) in self.tree {
+            if let Some(v_bytes) = v_bytes_opt {
+                backend_batch.upsert(k_bytes.as_ref(), v_bytes.as_ref());
+            } else {
+                backend_batch.remove(k_bytes.as_ref());
+            }
+        }
+        let mut events: SmallVec<[C::Event; 4]> = self
+            .upsert_ops
+            .into_iter()
+            .map(|(k, v)| C::Event::upsert(k, v))
+            .collect();
+        events.extend(self.delete_ops.into_iter().map(C::Event::remove));
+        (backend_batch, events)
+    }
+}
diff --git a/tools/kv_typed/src/bytes.rs b/tools/kv_typed/src/bytes.rs
new file mode 100644
index 0000000000000000000000000000000000000000..54a4d29dd0bc9285ee61db86ab2f55e847bcc1d6
--- /dev/null
+++ b/tools/kv_typed/src/bytes.rs
@@ -0,0 +1,62 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed bytes
+
+use crate::*;
+
+pub trait KeyBytes: AsRef<[u8]> + Debug + Ord {}
+impl<T> KeyBytes for T where T: AsRef<[u8]> + Debug + Ord {}
+pub trait ValueBytes: AsRef<[u8]> + Debug {}
+impl<T> ValueBytes for T where T: AsRef<[u8]> + Debug {}
+
+#[derive(Debug, Eq, PartialEq)]
+pub enum CowKB<'a, B: KeyBytes> {
+    B(&'a [u8]),
+    O(B),
+}
+impl<'a, B: KeyBytes> AsRef<[u8]> for CowKB<'a, B> {
+    fn as_ref(&self) -> &[u8] {
+        match self {
+            CowKB::B(b_ref) => b_ref,
+            CowKB::O(b) => b.as_ref(),
+        }
+    }
+}
+
+impl<'a, B: KeyBytes> PartialOrd for CowKB<'a, B> {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        self.as_ref().partial_cmp(other.as_ref())
+    }
+}
+impl<'a, B: KeyBytes> Ord for CowKB<'a, B> {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.as_ref().cmp(other.as_ref())
+    }
+}
+
+#[derive(Debug)]
+pub enum CowVB<'a, B: ValueBytes> {
+    B(&'a [u8]),
+    O(B),
+}
+impl<'a, B: ValueBytes> AsRef<[u8]> for CowVB<'a, B> {
+    fn as_ref(&self) -> &[u8] {
+        match self {
+            CowVB::B(b_ref) => b_ref,
+            CowVB::O(b) => b.as_ref(),
+        }
+    }
+}
diff --git a/tools/kv_typed/src/collection_inner.rs b/tools/kv_typed/src/collection_inner.rs
new file mode 100644
index 0000000000000000000000000000000000000000..452cc3f7904d983d1df3c6db8fe1764f9a0d6184
--- /dev/null
+++ b/tools/kv_typed/src/collection_inner.rs
@@ -0,0 +1,43 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+#[derive(Debug)]
+pub struct ColInner<BC: BackendCol, E: EventTrait> {
+    pub(crate) backend_col: BC,
+    subscribers: ColSubscribers<E>,
+}
+
+impl<BC: BackendCol, E: EventTrait> ColInner<BC, E> {
+    pub(crate) fn new(backend_col: BC) -> (Self, SubscriptionsSender<E>) {
+        let subscribers = ColSubscribers::<E>::default();
+        let subscription_sender = subscribers.get_subscription_sender();
+
+        (
+            ColInner {
+                backend_col,
+                subscribers,
+            },
+            subscription_sender,
+        )
+    }
+    pub(crate) fn notify_subscribers(&mut self, events: Events<E>) {
+        // Add new subscribers, notify all subscribers them prune died subscribers
+        self.subscribers.add_new_subscribers();
+        let died_subscribers = self.subscribers.notify_subscribers(Arc::new(events));
+        self.subscribers.prune_subscribers(died_subscribers);
+    }
+}
diff --git a/tools/kv_typed/src/collection_ro.rs b/tools/kv_typed/src/collection_ro.rs
new file mode 100644
index 0000000000000000000000000000000000000000..1ab5360100f86ddbb25da8e20c57ba886c42f735
--- /dev/null
+++ b/tools/kv_typed/src/collection_ro.rs
@@ -0,0 +1,267 @@
+use crate::*;
+
+pub trait DbCollectionRo: Sized {
+    type BackendCol: BackendCol;
+    type K: Key;
+    type V: Value;
+    type Event: EventTrait<K = Self::K, V = Self::V>;
+
+    fn contains_key(&self, k: &Self::K) -> KvResult<bool>;
+    fn count(&self) -> KvResult<usize>;
+    fn get(&self, k: &Self::K) -> KvResult<Option<Self::V>>;
+    /// Don't worry about complex iter type. Use it like an `impl Iterator<Item=KvResult<(K, V)>>`.
+    fn iter<
+        D: Send + Sync,
+        R: 'static + RangeBounds<Self::K>,
+        F: FnOnce(
+            KvIter<
+                Self::BackendCol,
+                <Self::BackendCol as BackendCol>::KeyBytes,
+                <Self::BackendCol as BackendCol>::ValueBytes,
+                <Self::BackendCol as BackendCol>::Iter,
+                Self::K,
+                Self::V,
+            >,
+        ) -> D,
+    >(
+        &self,
+        range: R,
+        f: F,
+    ) -> D;
+    /// Don't worry about complex iter type. Use it like an `impl Iterator<Item=KvResult<(K, V)>>`.
+    fn iter_rev<
+        D: Send + Sync,
+        R: 'static + RangeBounds<Self::K>,
+        F: FnOnce(
+            KvIter<
+                Self::BackendCol,
+                <Self::BackendCol as BackendCol>::KeyBytes,
+                <Self::BackendCol as BackendCol>::ValueBytes,
+                <Self::BackendCol as BackendCol>::Iter,
+                Self::K,
+                Self::V,
+            >,
+        ) -> D,
+    >(
+        &self,
+        range: R,
+        f: F,
+    ) -> D;
+    fn subscribe(&self, subscriber_sender: Subscriber<Self::Event>) -> KvResult<()>;
+}
+
+#[cfg(feature = "mock")]
+mockall::mock! {
+    pub ColRo<E: EventTrait> {}
+    trait DbCollectionRo {
+        type BackendCol = MockBackendCol;
+        type K = E::K;
+        type V = E::V;
+        type Event = E;
+
+        fn count(&self) -> KvResult<usize>;
+        fn get(&self, k: &E::K) -> KvResult<Option<E::V>>;
+        fn iter<R: 'static + RangeBounds<E::K>>(&self, range: R)
+        -> KvIter<MockBackendCol, MockBackendIter, E::K, E::V>;
+        fn subscribe(&self, subscriber_sender: Subscriber<E>) -> KvResult<()>;
+    }
+}
+
+type ColRoReader<'r, BC, E> = parking_lot::RwLockReadGuard<'r, ColInner<BC, E>>;
+
+#[derive(Debug)]
+pub struct ColRo<BC: BackendCol, E: EventTrait> {
+    pub(crate) inner: Arc<parking_lot::RwLock<ColInner<BC, E>>>,
+    pub(crate) subscription_sender: SubscriptionsSender<E>,
+}
+
+impl<BC: BackendCol, E: EventTrait> Clone for ColRo<BC, E> {
+    fn clone(&self) -> Self {
+        Self {
+            inner: Arc::clone(&self.inner),
+            subscription_sender: self.subscription_sender.clone(),
+        }
+    }
+}
+impl<BC: BackendCol, E: EventTrait> DbCollectionRo for ColRo<BC, E> {
+    type BackendCol = BC;
+    type K = E::K;
+    type V = E::V;
+    type Event = E;
+
+    #[inline(always)]
+    fn contains_key(&self, k: &Self::K) -> KvResult<bool> {
+        let r = self.inner.read();
+        r.backend_col.contains_key(k)
+    }
+    #[inline(always)]
+    fn count(&self) -> KvResult<usize> {
+        let r = self.inner.read();
+        r.backend_col.count()
+    }
+    #[inline(always)]
+    fn get(&self, k: &Self::K) -> KvResult<Option<Self::V>> {
+        let r = self.inner.read();
+        r.backend_col.get(k)
+    }
+    #[inline(always)]
+    fn iter<
+        D: Send + Sync,
+        R: 'static + RangeBounds<Self::K>,
+        F: FnOnce(
+            KvIter<
+                Self::BackendCol,
+                <Self::BackendCol as BackendCol>::KeyBytes,
+                <Self::BackendCol as BackendCol>::ValueBytes,
+                <Self::BackendCol as BackendCol>::Iter,
+                Self::K,
+                Self::V,
+            >,
+        ) -> D,
+    >(
+        &self,
+        range: R,
+        f: F,
+    ) -> D {
+        let range: RangeBytes = crate::iter::convert_range::<Self::K, R>(range);
+        let r = self.inner.read();
+        let iter = r.backend_col.iter::<Self::K, Self::V>(range);
+        f(KvIter::new(iter))
+    }
+    #[inline(always)]
+    fn iter_rev<
+        D: Send + Sync,
+        R: 'static + RangeBounds<Self::K>,
+        F: FnOnce(
+            KvIter<
+                Self::BackendCol,
+                <Self::BackendCol as BackendCol>::KeyBytes,
+                <Self::BackendCol as BackendCol>::ValueBytes,
+                <Self::BackendCol as BackendCol>::Iter,
+                Self::K,
+                Self::V,
+            >,
+        ) -> D,
+    >(
+        &self,
+        range: R,
+        f: F,
+    ) -> D {
+        let range: RangeBytes = crate::iter::convert_range::<Self::K, R>(range);
+        let r = self.inner.read();
+        let iter = r.backend_col.iter::<Self::K, Self::V>(range).reverse();
+        f(KvIter::new(iter))
+    }
+    #[inline(always)]
+    fn subscribe(&self, subscriber_sender: Subscriber<Self::Event>) -> KvResult<()> {
+        self.subscription_sender
+            .try_send(subscriber_sender)
+            .map_err(|_| KvError::FailToSubscribe)
+    }
+}
+
+pub trait DbCollectionRoGetRef<V: ValueZc>: DbCollectionRo<V = V> {
+    fn get_ref<D, F: Fn(&V::Ref) -> KvResult<D>>(
+        &self,
+        k: &<Self as DbCollectionRo>::K,
+        f: F,
+    ) -> KvResult<Option<D>>;
+}
+
+impl<V: ValueZc, BC: BackendCol, E: EventTrait<V = V>> DbCollectionRoGetRef<V> for ColRo<BC, E> {
+    fn get_ref<D, F: Fn(&V::Ref) -> KvResult<D>>(&self, k: &E::K, f: F) -> KvResult<Option<D>> {
+        let r = self.inner.read();
+        r.backend_col.get_ref::<E::K, V, D, F>(k, f)
+    }
+}
+
+pub trait DbCollectionRoGetRefSlice<V: ValueSliceZc>: DbCollectionRo<V = V> {
+    fn get_ref_slice<D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &<Self as DbCollectionRo>::K,
+        f: F,
+    ) -> KvResult<Option<D>>;
+}
+
+impl<V: ValueSliceZc, BC: BackendCol, E: EventTrait<V = V>> DbCollectionRoGetRefSlice<V>
+    for ColRo<BC, E>
+{
+    fn get_ref_slice<D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &E::K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        let r = self.inner.read();
+        r.backend_col.get_ref_slice::<E::K, V, D, F>(k, f)
+    }
+}
+
+pub trait DbCollectionRoIterRefSlice<'r, BC: BackendCol, K: KeyZc, V: ValueSliceZc, READER>:
+    DbCollectionRo<K = K, V = V>
+{
+    fn iter_ref_slice<D, R, F>(&'r self, range: R, f: F) -> KvIterRefSlice<BC, D, K, V, F, READER>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        R: 'static + RangeBounds<K>,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>;
+    fn iter_ref_slice_rev<D, R, F>(
+        &'r self,
+        range: R,
+        f: F,
+    ) -> KvIterRefSlice<BC, D, K, V, F, READER>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        R: 'static + RangeBounds<K>,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>;
+}
+
+impl<'r, K: KeyZc, V: ValueSliceZc, BC: BackendCol, E: EventTrait<K = K, V = V>>
+    DbCollectionRoIterRefSlice<'r, BC, K, V, ColRoReader<'r, BC, E>> for ColRo<BC, E>
+{
+    fn iter_ref_slice<D, R, F>(
+        &'r self,
+        range: R,
+        f: F,
+    ) -> KvIterRefSlice<BC, D, K, V, F, ColRoReader<'r, BC, E>>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        R: 'static + RangeBounds<K>,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+    {
+        let range: RangeBytes = crate::iter::convert_range::<Self::K, R>(range);
+        let reader = self.inner.read();
+        let inner_iter = reader.backend_col.iter_ref_slice::<D, K, V, F>(range, f);
+
+        KvIterRefSlice {
+            inner: inner_iter,
+            reader: OwnedOrRef::Owned(reader),
+        }
+    }
+
+    fn iter_ref_slice_rev<D, R, F>(
+        &'r self,
+        range: R,
+        f: F,
+    ) -> KvIterRefSlice<BC, D, K, V, F, ColRoReader<'r, BC, E>>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        R: 'static + RangeBounds<K>,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+    {
+        let range: RangeBytes = crate::iter::convert_range::<Self::K, R>(range);
+        let reader = self.inner.read();
+        let inner_iter = reader
+            .backend_col
+            .iter_ref_slice::<D, K, V, F>(range, f)
+            .reverse();
+
+        KvIterRefSlice {
+            inner: inner_iter,
+            reader: OwnedOrRef::Owned(reader),
+        }
+    }
+}
diff --git a/tools/kv_typed/src/collection_rw.rs b/tools/kv_typed/src/collection_rw.rs
new file mode 100644
index 0000000000000000000000000000000000000000..13ab15956d5c3da15e874a41fc0dfbe3ba19178b
--- /dev/null
+++ b/tools/kv_typed/src/collection_rw.rs
@@ -0,0 +1,100 @@
+use crate::*;
+use parking_lot::{
+    RwLockUpgradableReadGuard as UpgradableReadGuard, RwLockWriteGuard as WriteGuard,
+};
+
+pub trait DbCollectionRw {
+    type K: Key;
+    type V: Value;
+    type Event: EventTrait<K = Self::K, V = Self::V>;
+
+    fn clear(&self) -> KvResult<()>;
+    fn remove(&self, k: Self::K) -> KvResult<()>;
+    fn save(&self) -> KvResult<()>;
+    fn upsert(&self, k: Self::K, v: Self::V) -> KvResult<()>;
+}
+
+#[derive(Debug)]
+pub struct ColRw<BC: BackendCol, E: EventTrait> {
+    pub(crate) inner: ColRo<BC, E>,
+}
+
+impl<BC: BackendCol, E: EventTrait> Clone for ColRw<BC, E> {
+    fn clone(&self) -> Self {
+        Self {
+            inner: self.inner.clone(),
+        }
+    }
+}
+
+impl<BC: BackendCol, E: EventTrait> DbCollectionRw for ColRw<BC, E> {
+    type K = E::K;
+    type V = E::V;
+    type Event = E;
+
+    fn clear(&self) -> KvResult<()> {
+        let mut w = self.inner.inner.write();
+        w.backend_col.clear()?;
+        let events = smallvec::smallvec![E::clear()];
+        w.notify_subscribers(events);
+        Ok(())
+    }
+    fn remove(&self, k: Self::K) -> KvResult<()> {
+        let mut w = self.inner.inner.write();
+        w.backend_col.delete(&k)?;
+        let events = smallvec::smallvec![E::remove(k)];
+        w.notify_subscribers(events);
+        Ok(())
+    }
+    fn save(&self) -> KvResult<()> {
+        let w = self.inner.inner.write();
+        w.backend_col.save()?;
+        Ok(())
+    }
+    fn upsert(&self, k: Self::K, v: Self::V) -> KvResult<()> {
+        let mut w = self.inner.inner.write();
+        w.backend_col.put(&k, &v)?;
+        let events = smallvec::smallvec![E::upsert(k, v)];
+        w.notify_subscribers(events);
+        Ok(())
+    }
+}
+
+impl<BC: BackendCol, E: EventTrait> ColRw<BC, E> {
+    pub fn new(backend_col: BC) -> Self {
+        let (col_inner, subscription_sender) = ColInner::new(backend_col);
+        Self {
+            inner: ColRo {
+                inner: Arc::new(parking_lot::RwLock::new(col_inner)),
+                subscription_sender,
+            },
+        }
+    }
+    pub fn to_ro(&self) -> &ColRo<BC, E> {
+        &self.inner
+    }
+    #[doc(hidden)]
+    /// For internal usage only MUST NOT USE
+    pub fn upgradable_read(&self) -> UpgradableReadGuard<'_, ColInner<BC, E>> {
+        self.inner.inner.upgradable_read()
+    }
+    pub fn write_batch(&self, batch: Batch<BC, Self>) -> KvResult<()> {
+        let (backend_batch, events) = batch.into_backend_batch_and_events();
+        let mut w = self.inner.inner.write();
+        w.backend_col.write_batch(backend_batch)?;
+        w.notify_subscribers(events);
+        Ok(())
+    }
+    #[doc(hidden)]
+    /// For internal usage only MUST NOT USE
+    pub fn write_backend_batch(
+        &self,
+        backend_batch: BC::Batch,
+        events: Events<E>,
+        write_guard: &mut WriteGuard<ColInner<BC, E>>,
+    ) -> KvResult<()> {
+        write_guard.backend_col.write_batch(backend_batch)?;
+        write_guard.notify_subscribers(events);
+        Ok(())
+    }
+}
diff --git a/tools/kv_typed/src/db_schema.rs b/tools/kv_typed/src/db_schema.rs
new file mode 100644
index 0000000000000000000000000000000000000000..657d7797dcd12e1d075ed1a83e69c3e5a589181c
--- /dev/null
+++ b/tools/kv_typed/src/db_schema.rs
@@ -0,0 +1,242 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#[macro_export]
+macro_rules! db_schema {
+    ($db_name:ident, [ $([$col_path:literal, $col_name:ident, $K:ty, $V:ty]),*, ]) => {
+        paste::paste! {
+            $(
+                // Define each collection event type
+                #[derive(Debug, PartialEq)]
+                pub enum [<$col_name Event>] {
+                    Upsert { key: $K, value: $V },
+                    Remove { key: $K },
+                    RemoveAll,
+                }
+                impl kv_typed::prelude::EventTrait for [<$col_name Event>] {
+                    type K = $K;
+                    type V = $V;
+
+                    fn clear() -> Self { Self::RemoveAll }
+                    fn upsert(k: Self::K, v: Self::V) -> Self { Self::Upsert { key: k, value: v, } }
+                    fn remove(k: Self::K) -> Self { Self::Remove { key: k } }
+                }
+            )*
+            // Inner module used to hide internals types that must not be exposed on public api
+            pub use __inner::{[<$db_name Db>], [<$db_name DbRo>], [<$db_name DbWritable>], [<$db_name DbReadable>], [<$db_name DbTxRw>]};
+            mod __inner {
+                use super::*;
+                use kv_typed::prelude::*;
+                // DbCollections
+                #[derive(Clone, Debug)]
+                pub struct [<$db_name ColsRo>]<BC: BackendCol> {
+                    $([<$col_name:snake>]: ColRo<BC, [<$col_name Event>]>,)*
+                }
+                #[derive(Clone, Debug)]
+                pub struct [<$db_name ColsRw>]<BC: BackendCol> {
+                    $([<$col_name:snake>]: ColRw<BC, [<$col_name Event>]>,)*
+                }
+                impl<BC: BackendCol> [<$db_name ColsRw>]<BC> {
+                    fn to_ro(&self) -> [<$db_name ColsRo>]<BC> {
+                        [<$db_name ColsRo>] {
+                            $([<$col_name:snake>]: self.[<$col_name:snake>].to_ro().clone(),)*
+                        }
+                    }
+                }
+                // Db
+                #[derive(Debug)]
+                pub struct [<$db_name Db>]<B: Backend> {
+                    collections: [<$db_name ColsRw>]<B::Col>,
+                }
+                impl<B: Backend> [<$db_name Db>]<B> {
+                    pub const NAME: &'static str = stringify!([<$db_name:snake>]);
+                }
+                impl<B: Backend> Clone for [<$db_name Db>]<B> {
+                    fn clone(&self) -> Self {
+                        [<$db_name Db>] {
+                            collections: self.collections.clone(),
+                        }
+                    }
+                }
+                #[cfg(feature = "explorer")]
+                impl<B: Backend> kv_typed::explorer::DbExplorable for [<$db_name Db>]<B> {
+                    fn explore<'a>(
+                        &self,
+                        collection_name: &str,
+                        action: kv_typed::explorer::ExplorerAction<'a>,
+                        stringify_json_value: fn(serde_json::Value) -> serde_json::Value,
+                    ) -> KvResult<std::result::Result<kv_typed::explorer::ExplorerActionResponse, ExplorerActionErr>> {
+                        $( if stringify!([<$col_name:snake>]) == collection_name {
+                            return action.exec(&self.collections.[<$col_name:snake>], stringify_json_value);
+                        } )*
+                        Ok(Err(ExplorerActionErr(format!("collection '{}' not exist in database '{}'.", collection_name, stringify!([<$db_name Db>])).into())))
+                    }
+                    fn list_collections() -> Vec<(&'static str, &'static str, &'static str)> {
+                        vec![
+                            $((stringify!([<$col_name:snake>]), stringify!($K), stringify!($V)),)*
+                        ]
+                    }
+                }
+                // Batch
+                pub struct [<$db_name DbBatch>]<B: Backend> {
+                    $([<$col_name:snake>]: Batch<B::Col, ColRw<B::Col, [<$col_name Event>]>>,)*
+                }
+                impl<B: Backend> Default for [<$db_name DbBatch>]<B> {
+                    fn default() -> Self {
+                        [<$db_name DbBatch>] {
+                            $([<$col_name:snake>]: Batch::default(),)*
+                        }
+                    }
+                }
+                impl<B: Backend> [<$db_name DbBatch>]<B> {
+                    $(pub fn [<$col_name:snake>](&mut self) -> &mut Batch<B::Col, ColRw<B::Col, [<$col_name Event>]>> { &mut self.[<$col_name:snake>] })*
+                }
+                // impl TransactionalWrite for Db
+                #[derive(Debug)]
+                pub struct [<$db_name DbTxRw>]<'tx, BC: BackendCol> {
+                    $(pub [<$col_name:snake>]: TxColRw<'tx, BC, [<$col_name Event>]>,)*
+                }
+                impl<'tx, B: Backend> TransactionalWrite<'tx, B::Col> for &'tx [<$db_name Db>]<B> {
+                    type TxCols = [<$db_name DbTxRw>]<'tx, B::Col>;
+
+                    fn write<D, F: FnOnce(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> KvResult<D> {
+                        $(let [<$col_name:snake _upgradable_guard>] = self.collections.[<$col_name:snake>].upgradable_read();)*
+
+                        $(let mut [<$col_name:snake _batch>] =  Batch::<B::Col, ColRw<B::Col, [<$col_name Event>]>>::default();)*
+
+                        let db_tx = [<$db_name DbTxRw>] {
+                            $([<$col_name:snake>]: TxColRw::new(
+                                unsafe { std::mem::transmute(&mut [<$col_name:snake _batch>]) },
+                                unsafe { std::mem::transmute(&[<$col_name:snake _upgradable_guard>]) },
+                            ),)*
+                        };
+
+                        let data = f(db_tx)?;
+
+                        // Prepare commit
+                        $(let ([<$col_name:snake _backend_batch>], [<$col_name:snake _events>]) = [<$col_name:snake _batch>].into_backend_batch_and_events();)*
+
+                        // Acquire exclusive lock
+                        $(let mut [<$col_name:snake _write_guard>] = parking_lot::RwLockUpgradableReadGuard::upgrade([<$col_name:snake _upgradable_guard>]);)*;
+
+                        // Commit
+                        $(self.collections.[<$col_name:snake>].write_backend_batch(
+                            [<$col_name:snake _backend_batch>],
+                            [<$col_name:snake _events>],
+                            &mut [<$col_name:snake _write_guard>],
+                        )?;)*
+
+                        Ok(data)
+                    }
+                }
+                // DbRo
+                #[derive(Debug)]
+                pub struct [<$db_name DbRo>]<B: Backend> {
+                    collections: [<$db_name ColsRo>]<B::Col>,
+                }
+                impl<B: Backend> [<$db_name DbRo>]<B> {
+                    pub const NAME: &'static str = stringify!([<$db_name:snake>]);
+                }
+                impl<B: Backend> Clone for [<$db_name DbRo>]<B> {
+                    fn clone(&self) -> Self {
+                        [<$db_name DbRo>] {
+                            collections: self.collections.clone(),
+                        }
+                    }
+                }
+                // Read operations
+                pub trait [<$db_name DbReadable>]: Sized {
+                    type Backend: Backend;
+
+                    $(fn [<$col_name:snake>](&self) -> &ColRo<<Self::Backend as Backend>::Col, [<$col_name Event>]>;)*
+                }
+                impl<B: Backend> [<$db_name DbReadable>] for [<$db_name Db>]<B> {
+                    type Backend = B;
+
+                    $(fn [<$col_name:snake>](&self) -> &ColRo<B::Col, [<$col_name Event>]> { &self.collections.[<$col_name:snake>].to_ro() })*
+                }
+                impl<B: Backend> [<$db_name DbReadable>] for [<$db_name DbRo>]<B>{
+                    type Backend = B;
+
+                    $(fn [<$col_name:snake>](&self) -> &ColRo<B::Col, [<$col_name Event>]> { &self.collections.[<$col_name:snake>] })*
+                }
+                // Write operations
+                pub trait [<$db_name DbWritable>]: [<$db_name DbReadable>] {
+                    type Backend: Backend;
+                    type Batch;
+                    $(type [<$col_name ColRw>]: DbCollectionRw;)*
+                    type DbRo: Sized;
+
+                    fn clear(&self) -> KvResult<()>;
+                    fn get_ro_handler(&self) -> Self::DbRo;
+                    fn open(
+                        backend_conf: <<Self as [<$db_name DbWritable>]>::Backend as kv_typed::backend::Backend>::Conf,
+                    ) -> KvResult <Self>;
+                    fn new_batch(&self) -> Self::Batch;
+                    fn save(&self) -> KvResult<()>;
+                    fn write_batch(&self, batch: Self::Batch) -> KvResult<()>;
+                    $(fn [<$col_name:snake _write>](&self) -> &Self::[<$col_name ColRw>];)*
+                }
+                impl<B: Backend> [<$db_name DbWritable>] for [<$db_name Db>]<B> {
+                    type Backend = B;
+                    type Batch = [<$db_name DbBatch>]<B>;
+                    $(type [<$col_name ColRw>] = ColRw<B::Col, [<$col_name Event>]>;)*
+                    type DbRo = [<$db_name DbRo>]<B>;
+
+                    #[inline(always)]
+                    fn clear(&self) -> KvResult<()> {
+                        $(self.collections.[<$col_name:snake>].clear()?;)*
+                        Ok(())
+                    }
+                    #[inline(always)]
+                    fn get_ro_handler(&self) -> Self::DbRo {
+                        [<$db_name DbRo>] {
+                            collections: self.collections.to_ro(),
+                        }
+                    }
+                    #[inline(always)]
+                    fn new_batch(&self) -> Self::Batch {
+                        <[<$db_name DbBatch>]::<B>>::default()
+                    }
+                    fn write_batch(&self, batch: Self::Batch) -> KvResult<()> {
+                        $(self.collections.[<$col_name:snake>].write_batch(batch.[<$col_name:snake>])?;)*
+                        Ok(())
+                    }
+                    fn open(
+                        backend_conf: <<Self as [<$db_name DbWritable>]>::Backend as kv_typed::backend::Backend>::Conf,
+                    ) -> KvResult <Self> {
+                        let mut db = B::open(&backend_conf)?;
+                        Ok([<$db_name Db>] {
+                            collections: [<$db_name ColsRw>] {
+                                $([<$col_name:snake>]: <ColRw<B::Col, [<$col_name Event>]>>::new(
+                                    db.open_col(&backend_conf, $col_path)?
+                                ),)*
+                            },
+                        })
+                    }
+                    #[inline(always)]
+                    fn save(&self) -> KvResult<()> {
+                        $(self.collections.[<$col_name:snake>].save()?;)*
+                        Ok(())
+                    }
+                    $(
+                        #[inline(always)]
+                        fn [<$col_name:snake _write>](&self) -> &ColRw<B::Col, [<$col_name Event>]> { &self.collections.[<$col_name:snake>] }
+                    )*
+                }
+            }
+        }
+    };
+}
diff --git a/tools/kv_typed/src/error.rs b/tools/kv_typed/src/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8762ef816b6d4c47e487d5a787678aab6246781f
--- /dev/null
+++ b/tools/kv_typed/src/error.rs
@@ -0,0 +1,75 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed error type
+
+use crate::*;
+
+pub type DynErr = Box<dyn Error + Send + Sync + 'static>;
+
+/// KV Typed error
+pub type KvResult<T> = Result<T, KvError>;
+
+#[allow(type_alias_bounds)]
+pub(crate) type BackendResult<BC: BackendCol> =
+    Result<(<BC as BackendCol>::KeyBytes, <BC as BackendCol>::ValueBytes), DynErr>;
+
+/// KV Typed error
+#[derive(Debug, Error)]
+pub enum KvError {
+    /// Backend error
+    #[error("Backend error: {0}")]
+    BackendError(DynErr),
+    /// Custom
+    #[error("{0}")]
+    Custom(DynErr),
+    // DB corrupted
+    #[error("DB corrupted:{0}")]
+    DbCorrupted(String),
+    // Error at serialisation or deserialisation
+    #[error("DeserError: {0}")]
+    DeserError(DynErr),
+    /// FailToCreateDbFolder
+    #[error("FailToCreateDbFolder: {0}")]
+    FailToCreateDbFolder(std::io::Error),
+    /// FailToSubscribe
+    #[error("FailToSubscribe")]
+    FailToSubscribe,
+}
+
+impl From<std::io::Error> for KvError {
+    fn from(e: std::io::Error) -> Self {
+        KvError::BackendError(e.into())
+    }
+}
+
+#[cfg(feature = "leveldb_backend")]
+impl From<crate::backend::leveldb::LevelDbError> for KvError {
+    fn from(e: crate::backend::leveldb::LevelDbError) -> Self {
+        KvError::BackendError(Box::new(e).into())
+    }
+}
+#[cfg(feature = "lmdb_backend")]
+impl From<lmdb_zero::Error> for KvError {
+    fn from(e: lmdb_zero::Error) -> Self {
+        KvError::BackendError(e.into())
+    }
+}
+#[cfg(feature = "sled_backend")]
+impl From<sled::Error> for KvError {
+    fn from(e: sled::Error) -> Self {
+        KvError::BackendError(Box::new(e).into())
+    }
+}
diff --git a/tools/kv_typed/src/event.rs b/tools/kv_typed/src/event.rs
new file mode 100644
index 0000000000000000000000000000000000000000..0a2c79abe82bc8ab9cea316804bd8095eacb2e47
--- /dev/null
+++ b/tools/kv_typed/src/event.rs
@@ -0,0 +1,31 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed event
+
+use crate::*;
+
+/// Database events
+pub type Events<E> = SmallVec<[E; 4]>;
+
+/// Event trait
+pub trait EventTrait: 'static + Debug + PartialEq + Send + Sync {
+    type K: Key;
+    type V: Value;
+
+    fn clear() -> Self;
+    fn upsert(k: Self::K, v: Self::V) -> Self;
+    fn remove(k: Self::K) -> Self;
+}
diff --git a/tools/kv_typed/src/explorer.rs b/tools/kv_typed/src/explorer.rs
new file mode 100644
index 0000000000000000000000000000000000000000..e23b2a053b30b735af68898506311f7322274a00
--- /dev/null
+++ b/tools/kv_typed/src/explorer.rs
@@ -0,0 +1,529 @@
+use crate::*;
+use rayon::{iter::ParallelBridge, prelude::*};
+use std::num::NonZeroUsize;
+
+pub trait DbExplorable {
+    fn explore<'a>(
+        &self,
+        collection_name: &str,
+        action: ExplorerAction<'a>,
+        stringify_json_value: fn(serde_json::Value) -> serde_json::Value,
+    ) -> KvResult<Result<ExplorerActionResponse, ExplorerActionErr>>;
+    fn list_collections() -> Vec<(&'static str, &'static str, &'static str)>;
+}
+
+#[derive(Debug, Error)]
+#[error("Fail to parse key: {0}")]
+pub struct FromExplorerKeyErr(pub DynErr);
+
+#[derive(Debug, Error)]
+#[error("Fail to parse value: {0}")]
+pub struct FromExplorerValueErr(pub DynErr);
+
+pub trait ExplorableKey: Sized {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr>;
+    fn to_explorer_string(&self) -> KvResult<String>;
+}
+
+impl ExplorableKey for () {
+    fn from_explorer_str(_: &str) -> Result<Self, FromExplorerKeyErr> {
+        Ok(())
+    }
+
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(String::with_capacity(0))
+    }
+}
+
+impl ExplorableKey for String {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+        Ok(source.to_owned())
+    }
+
+    fn to_explorer_string(&self) -> KvResult<String> {
+        Ok(self.clone())
+    }
+}
+
+macro_rules! impl_explorable_key_for_numbers {
+    ($($T:ty),*) => {$(
+        impl ExplorableKey for $T {
+            fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+                source.parse().map_err(|e| FromExplorerKeyErr(Box::new(e)))
+            }
+
+            fn to_explorer_string(&self) -> KvResult<String> {
+                Ok(format!("{}", self))
+            }
+        }
+    )*};
+}
+impl_explorable_key_for_numbers!(usize, u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, f32, f64);
+
+macro_rules! impl_explorable_key_for_be_numbers {
+    ($($T:ty),*) => {$(
+        impl ExplorableKey for $T {
+            fn from_explorer_str(source: &str) -> Result<Self, FromExplorerKeyErr> {
+                Ok(Self(source.parse().map_err(|e| FromExplorerKeyErr(Box::new(e)))?))
+            }
+
+            fn to_explorer_string(&self) -> KvResult<String> {
+                Ok(format!("{}", self.0))
+            }
+        }
+    )*};
+}
+impl_explorable_key_for_be_numbers!(U32BE, U64BE);
+
+pub trait ExplorableValue: Sized {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr>;
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value>;
+}
+
+impl ExplorableValue for () {
+    fn from_explorer_str(_: &str) -> Result<Self, FromExplorerValueErr> {
+        Ok(())
+    }
+
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::String(String::with_capacity(0)))
+    }
+}
+
+impl ExplorableValue for String {
+    fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+        Ok(source.to_owned())
+    }
+
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::String(self.clone()))
+    }
+}
+
+macro_rules! impl_explorable_value_for_numbers {
+    ($($T:ty),*) => {$(
+        impl ExplorableValue for $T {
+            fn from_explorer_str(source: &str) -> Result<Self, FromExplorerValueErr> {
+                source.parse().map_err(|e| FromExplorerValueErr(Box::new(e)))
+            }
+
+            #[allow(trivial_numeric_casts)]
+            fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+                Ok(serde_json::Value::Number(serde_json::Number::from_f64(*self as f64).expect("too large number")))
+            }
+        }
+    )*};
+}
+
+impl_explorable_value_for_numbers!(
+    usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64
+);
+
+impl<T, E> ExplorableValue for Vec<T>
+where
+    T: Display + FromStr<Err = E>,
+    E: Error + Send + Sync + 'static,
+{
+    fn from_explorer_str(source: &str) -> Result<Vec<T>, FromExplorerValueErr> {
+        if let serde_json::Value::Array(json_array) =
+            serde_json::Value::from_str(source).map_err(|e| FromExplorerValueErr(e.into()))?
+        {
+            let mut vec = Vec::with_capacity(json_array.len());
+            for value in json_array {
+                if let serde_json::Value::String(string) = value {
+                    vec.push(<T>::from_str(&string).map_err(|e| FromExplorerValueErr(e.into()))?);
+                } else {
+                    return Err(FromExplorerValueErr(
+                        format!("Expected array of {}.", stringify!(T)).into(),
+                    ));
+                }
+            }
+            Ok(vec)
+        } else {
+            Err(FromExplorerValueErr(
+                format!("Expected array of {}.", stringify!(T)).into(),
+            ))
+        }
+    }
+
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::Array(
+            self.iter()
+                .map(|elem| serde_json::Value::String(format!("{}", elem)))
+                .collect(),
+        ))
+    }
+}
+
+macro_rules! impl_explorable_value_for_smallvec {
+    ($($N:literal),*) => {$(
+        impl<T, E> ExplorableValue for SmallVec<[T; $N]>
+        where
+            T: Display + FromStr<Err = E>,
+            E: Error + Send + Sync + 'static,
+        {
+            fn from_explorer_str(source: &str) -> Result<SmallVec<[T; $N]>, FromExplorerValueErr> {
+                if let serde_json::Value::Array(json_array) =
+                    serde_json::Value::from_str(source).map_err(|e| FromExplorerValueErr(e.into()))?
+                {
+                    let mut svec = SmallVec::with_capacity(json_array.len());
+                    for value in json_array {
+                        if let serde_json::Value::String(string) = value {
+                            svec.push(<T>::from_str(&string).map_err(|e| FromExplorerValueErr(e.into()))?);
+                        } else {
+                            return Err(FromExplorerValueErr(format!("Expected array of {}.", stringify!(T)).into()));
+                        }
+                    }
+                    Ok(svec)
+                } else {
+                    Err(FromExplorerValueErr(format!("Expected array of {}.", stringify!(T)).into()))
+                }
+            }
+
+            fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+                Ok(serde_json::Value::Array(
+                    self.iter()
+                        .map(|elem| serde_json::Value::String(format!("{}", elem)))
+                        .collect(),
+                ))
+            }
+        }
+    )*};
+}
+impl_explorable_value_for_smallvec!(2, 4, 8, 16, 32, 64);
+
+impl<T, E> ExplorableValue for BTreeSet<T>
+where
+    T: Display + FromStr<Err = E> + Ord,
+    E: Error + Send + Sync + 'static,
+{
+    fn from_explorer_str(source: &str) -> Result<BTreeSet<T>, FromExplorerValueErr> {
+        if let serde_json::Value::Array(json_array) =
+            serde_json::Value::from_str(source).map_err(|e| FromExplorerValueErr(e.into()))?
+        {
+            let mut bt_set = BTreeSet::new();
+            for value in json_array {
+                if let serde_json::Value::String(string) = value {
+                    bt_set.insert(
+                        <T>::from_str(&string).map_err(|e| FromExplorerValueErr(e.into()))?,
+                    );
+                } else {
+                    return Err(FromExplorerValueErr(
+                        format!("Expected array of {}.", stringify!(T)).into(),
+                    ));
+                }
+            }
+            Ok(bt_set)
+        } else {
+            Err(FromExplorerValueErr(
+                format!("Expected array of {}.", stringify!(T)).into(),
+            ))
+        }
+    }
+
+    fn to_explorer_json(&self) -> KvResult<serde_json::Value> {
+        Ok(serde_json::Value::Array(
+            self.iter()
+                .map(|elem| serde_json::Value::String(format!("{}", elem)))
+                .collect(),
+        ))
+    }
+}
+
+#[derive(Debug)]
+pub enum ExplorerAction<'a> {
+    Count,
+    Get {
+        key: &'a str,
+    },
+    Find {
+        key_min: Option<String>,
+        key_max: Option<String>,
+        key_regex: Option<regex::Regex>,
+        value_regex: Option<regex::Regex>,
+        limit: Option<usize>,
+        reverse: bool,
+        step: NonZeroUsize,
+    },
+    Put {
+        key: &'a str,
+        value: &'a str,
+    },
+    Delete {
+        key: &'a str,
+    },
+}
+
+#[derive(Debug, PartialEq)]
+pub struct EntryFound {
+    pub key: String,
+    pub value: serde_json::Value,
+    pub captures: Option<ValueCaptures>,
+}
+
+#[derive(Debug, PartialEq)]
+pub struct ValueCaptures(pub SmallVec<[SmallVec<[Option<String>; 8]>; 8]>);
+
+#[derive(Debug, PartialEq)]
+pub enum ExplorerActionResponse {
+    Count(usize),
+    Get(Option<serde_json::Value>),
+    Find(Vec<EntryFound>),
+    PutOk,
+    DeleteOk,
+}
+
+#[derive(Debug, Error)]
+#[error("Fail to exec explorer action: {0}")]
+pub struct ExplorerActionErr(pub DynErr);
+impl From<FromExplorerKeyErr> for ExplorerActionErr {
+    fn from(e: FromExplorerKeyErr) -> Self {
+        ExplorerActionErr(e.0)
+    }
+}
+impl From<FromExplorerValueErr> for ExplorerActionErr {
+    fn from(e: FromExplorerValueErr) -> Self {
+        ExplorerActionErr(e.0)
+    }
+}
+
+impl<'a> ExplorerAction<'a> {
+    pub fn exec<BC: BackendCol, E: EventTrait>(
+        self,
+        col: &ColRw<BC, E>,
+        stringify_json_value: fn(serde_json::Value) -> serde_json::Value,
+    ) -> KvResult<Result<ExplorerActionResponse, ExplorerActionErr>> {
+        Ok(match self {
+            Self::Count => Ok(ExplorerActionResponse::Count(col.to_ro().count()?)),
+            Self::Get { key } => match E::K::from_explorer_str(key) {
+                Ok(k) => Ok(ExplorerActionResponse::Get(
+                    col.to_ro()
+                        .get(&k)?
+                        .map(|v| v.to_explorer_json())
+                        .transpose()?,
+                )),
+                Err(e) => Err(e.into()),
+            },
+            Self::Find {
+                key_min,
+                key_max,
+                key_regex,
+                value_regex,
+                limit,
+                reverse,
+                step,
+            } => match define_range::<E::K>(key_min, key_max) {
+                Ok(range) => Ok(ExplorerActionResponse::Find(match range {
+                    Range::Full => Self::get_range_inner(
+                        col.to_ro(),
+                        ..,
+                        key_regex,
+                        value_regex,
+                        limit,
+                        reverse,
+                        step,
+                        stringify_json_value,
+                    )?,
+                    Range::From(range) => Self::get_range_inner(
+                        col.to_ro(),
+                        range,
+                        key_regex,
+                        value_regex,
+                        limit,
+                        reverse,
+                        step,
+                        stringify_json_value,
+                    )?,
+                    Range::FromTo(range) => Self::get_range_inner(
+                        col.to_ro(),
+                        range,
+                        key_regex,
+                        value_regex,
+                        limit,
+                        reverse,
+                        step,
+                        stringify_json_value,
+                    )?,
+                    Range::To(range) => Self::get_range_inner(
+                        col.to_ro(),
+                        range,
+                        key_regex,
+                        value_regex,
+                        limit,
+                        reverse,
+                        step,
+                        stringify_json_value,
+                    )?,
+                })),
+                Err(e) => Err(ExplorerActionErr(e)),
+            },
+            Self::Put { key, value } => match E::K::from_explorer_str(key) {
+                Ok(k) => match E::V::from_explorer_str(value) {
+                    Ok(v) => {
+                        col.upsert(k, v)?;
+                        Ok(ExplorerActionResponse::PutOk)
+                    }
+                    Err(e) => Err(e.into()),
+                },
+                Err(e) => Err(e.into()),
+            },
+            Self::Delete { key } => match E::K::from_explorer_str(key) {
+                Ok(k) => {
+                    col.remove(k)?;
+                    Ok(ExplorerActionResponse::DeleteOk)
+                }
+                Err(e) => Err(e.into()),
+            },
+        })
+    }
+    #[allow(clippy::too_many_arguments)]
+    fn get_range_inner<BC: BackendCol, E: EventTrait, R: 'static + RangeBounds<E::K>>(
+        col: &ColRo<BC, E>,
+        range: R,
+        key_regex: Option<regex::Regex>,
+        value_regex: Option<regex::Regex>,
+        limit: Option<usize>,
+        reverse: bool,
+        step: NonZeroUsize,
+        stringify_json_value: fn(serde_json::Value) -> serde_json::Value,
+    ) -> KvResult<Vec<EntryFound>> {
+        let filter_map_closure = move |entry_res| {
+            stringify_and_filter_entry_res::<E::K, E::V>(
+                entry_res,
+                key_regex.as_ref(),
+                value_regex.as_ref(),
+                stringify_json_value,
+            )
+        };
+
+        if let Some(limit) = limit {
+            if reverse {
+                col.iter_rev(range, |iter| {
+                    iter.step_by(step.get())
+                        .filter_map(filter_map_closure)
+                        .take(limit)
+                        .collect()
+                })
+            } else {
+                col.iter(range, |iter| {
+                    iter.step_by(step.get())
+                        .filter_map(filter_map_closure)
+                        .take(limit)
+                        .collect()
+                })
+            }
+        } else {
+            {
+                let (send, recv) = unbounded();
+
+                let handler = std::thread::spawn(move || {
+                    let iter = recv.into_iter().step_by(step.get()).par_bridge();
+
+                    iter.filter_map(filter_map_closure).collect()
+                });
+
+                if reverse {
+                    col.iter_rev(range, |iter| {
+                        for entry_res in iter {
+                            if send.try_send(entry_res).is_err() {
+                                return handler.join().expect("child thread panic");
+                            }
+                        }
+                        drop(send);
+
+                        handler.join().expect("child thread panic")
+                    })
+                } else {
+                    col.iter(range, |iter| {
+                        for entry_res in iter {
+                            if send.try_send(entry_res).is_err() {
+                                return handler.join().expect("child thread panic");
+                            }
+                        }
+                        drop(send);
+
+                        handler.join().expect("child thread panic")
+                    })
+                }
+            }
+        }
+    }
+}
+
+enum Range<K> {
+    Full,
+    From(core::ops::RangeFrom<K>),
+    To(core::ops::RangeToInclusive<K>),
+    FromTo(core::ops::RangeInclusive<K>),
+}
+
+fn define_range<K: Key>(
+    key_min_opt: Option<String>,
+    key_max_opt: Option<String>,
+) -> Result<Range<K>, DynErr> {
+    if let Some(key_min) = key_min_opt {
+        let k_min = K::from_explorer_str(&key_min)?;
+        if let Some(key_max) = key_max_opt {
+            let k_max = K::from_explorer_str(&key_max)?;
+            Ok(Range::FromTo(core::ops::RangeInclusive::new(k_min, k_max)))
+        } else {
+            Ok(Range::From(core::ops::RangeFrom { start: k_min }))
+        }
+    } else if let Some(key_max) = key_max_opt {
+        let k_max = K::from_explorer_str(&key_max)?;
+        Ok(Range::To(core::ops::RangeToInclusive { end: k_max }))
+    } else {
+        Ok(Range::Full)
+    }
+}
+
+fn stringify_and_filter_entry_res<K: Key, V: Value>(
+    entry_res: KvResult<(K, V)>,
+    key_regex_opt: Option<&regex::Regex>,
+    value_regex_opt: Option<&regex::Regex>,
+    stringify_json_value: fn(serde_json::Value) -> serde_json::Value,
+) -> Option<KvResult<EntryFound>> {
+    match entry_res {
+        Ok((k, v)) => match k.to_explorer_string() {
+            Ok(key_string) => {
+                if let Some(key_regex) = key_regex_opt {
+                    if !key_regex.is_match(&key_string) {
+                        return None;
+                    }
+                }
+                match v.to_explorer_json() {
+                    Ok(mut value_json) => {
+                        value_json = stringify_json_value(value_json);
+                        let captures = if let Some(value_regex) = value_regex_opt {
+                            let value_string = value_json.to_string();
+                            if !value_regex.is_match(&value_string) {
+                                return None;
+                            }
+                            Some(ValueCaptures(
+                                value_regex
+                                    .captures_iter(&value_string)
+                                    .map(|caps| {
+                                        caps.iter()
+                                            .skip(1)
+                                            .map(|m_opt| m_opt.map(|m| m.as_str().to_owned()))
+                                            .collect::<SmallVec<[Option<String>; 8]>>()
+                                    })
+                                    .collect(),
+                            ))
+                        } else {
+                            None
+                        };
+                        Some(Ok(EntryFound {
+                            key: key_string,
+                            value: value_json,
+                            captures,
+                        }))
+                    }
+                    Err(e) => Some(Err(e)),
+                }
+            }
+            Err(e) => Some(Err(e)),
+        },
+        Err(e) => Some(Err(e)),
+    }
+}
diff --git a/tools/kv_typed/src/from_bytes.rs b/tools/kv_typed/src/from_bytes.rs
new file mode 100644
index 0000000000000000000000000000000000000000..138f9ff8cc732d972cf5f9012a22b56df234e928
--- /dev/null
+++ b/tools/kv_typed/src/from_bytes.rs
@@ -0,0 +1,127 @@
+use crate::*;
+
+#[derive(Clone, Copy, Debug, Error)]
+#[error("Corrupted DB: {0} bytes are wrong aligned or have invalid length")]
+pub struct LayoutVerifiedErr(pub &'static str);
+
+pub trait FromBytes: Sized {
+    type Err: Error + Send + Sync + 'static;
+
+    /// Create Self from bytes.
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err>;
+}
+
+impl FromBytes for () {
+    type Err = std::convert::Infallible;
+
+    fn from_bytes(_: &[u8]) -> Result<Self, Self::Err> {
+        Ok(())
+    }
+}
+
+macro_rules! impl_from_bytes_for_numbers {
+    ($($T:ty),*) => {$(
+        impl FromBytes for $T {
+            type Err = std::array::TryFromSliceError;
+
+            fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+                Ok(<$T>::from_le_bytes(bytes.try_into()?))
+            }
+        }
+    )*};
+}
+impl_from_bytes_for_numbers!(
+    usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64
+);
+
+macro_rules! impl_from_bytes_for_be_numbers {
+    ($(($T:ty, $INT:ty)),*) => {$(
+        impl FromBytes for $T {
+            type Err = std::array::TryFromSliceError;
+
+            fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+                Ok(Self(<$INT>::from_be_bytes(bytes.try_into()?)))
+            }
+        }
+    )*};
+}
+impl_from_bytes_for_be_numbers!((U32BE, u32), (U64BE, u64));
+
+impl FromBytes for String {
+    type Err = std::str::Utf8Error;
+
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+        Ok(std::str::from_utf8(bytes)?.to_owned())
+    }
+}
+
+macro_rules! impl_from_bytes_for_smallvec {
+    ($($N:literal),*) => {$(
+        impl<T> FromBytes for SmallVec<[T; $N]>
+        where
+            T: Copy + zerocopy::FromBytes,
+        {
+            type Err = LayoutVerifiedErr;
+
+            fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+                let layout_verified = zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes)
+                    .ok_or_else(|| LayoutVerifiedErr(stringify!(T)).into())?;
+                Ok(SmallVec::from_slice(layout_verified.into_slice()))
+            }
+        }
+    )*};
+}
+impl_from_bytes_for_smallvec!(1, 2, 4, 8, 16, 32, 64);
+
+impl<T> FromBytes for Vec<T>
+where
+    T: Copy + Default + zerocopy::FromBytes,
+{
+    type Err = LayoutVerifiedErr;
+
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+        let layout_verified = zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes)
+            .ok_or(LayoutVerifiedErr(stringify!(Vec<T>)))?;
+        let slice = layout_verified.into_slice();
+        let mut vec = Vec::with_capacity(slice.len());
+        vec.resize_with(slice.len(), Default::default);
+        vec.copy_from_slice(slice);
+        Ok(vec)
+    }
+}
+
+impl<T> FromBytes for BTreeSet<T>
+where
+    T: Copy + zerocopy::FromBytes + Ord,
+{
+    type Err = LayoutVerifiedErr;
+
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+        let layout_verified = zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes)
+            .ok_or(LayoutVerifiedErr(stringify!(BTreeSet<T>)))?;
+        let slice = layout_verified.into_slice();
+        Ok(slice.iter().copied().collect())
+    }
+}
+
+impl<T> FromBytes for HashSet<T>
+where
+    T: Copy + Eq + zerocopy::FromBytes + std::hash::Hash,
+{
+    type Err = LayoutVerifiedErr;
+
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+        let layout_verified = zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes)
+            .ok_or(LayoutVerifiedErr(stringify!(HashSet<T>)))?;
+        let slice = layout_verified.into_slice();
+        Ok(slice.iter().copied().collect())
+    }
+}
+
+impl FromBytes for IVec {
+    type Err = std::convert::Infallible;
+
+    fn from_bytes(bytes: &[u8]) -> Result<Self, Self::Err> {
+        Ok(Self::from(bytes))
+    }
+}
diff --git a/tools/kv_typed/src/iter.rs b/tools/kv_typed/src/iter.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8671cb182b9e63aced1af0e4b1ad71c7d385b811
--- /dev/null
+++ b/tools/kv_typed/src/iter.rs
@@ -0,0 +1,214 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed iterators
+
+pub mod keys;
+pub mod values;
+
+use crate::*;
+
+pub trait ReversableIterator: Iterator + Sized {
+    fn reverse(self) -> Self;
+
+    #[inline(always)]
+    fn last(self) -> Option<Self::Item> {
+        self.reverse().next()
+    }
+}
+
+pub trait ResultIter<T, E>: Iterator<Item = Result<T, E>> + Sized {
+    #[inline(always)]
+    fn next_res(&mut self) -> Result<Option<T>, E> {
+        self.next().transpose()
+    }
+}
+impl<I, T, E> ResultIter<T, E> for I where I: Iterator<Item = Result<T, E>> + Sized {}
+
+pub type RangeBytes = (Bound<IVec>, Bound<IVec>);
+
+#[derive(Debug)]
+pub struct KvIter<
+    C: BackendCol,
+    KB: KeyBytes,
+    VB: ValueBytes,
+    BI: BackendIter<KB, VB>,
+    K: Key,
+    V: Value,
+> {
+    backend_iter: BI,
+    phantom: PhantomData<(C, KB, VB, K, V)>,
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key, V: Value>
+    Iterator for KvIter<C, KB, VB, BI, K, V>
+{
+    type Item = KvResult<(K, V)>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.backend_iter.next() {
+            Some(Ok((key_bytes, value_bytes))) => match K::from_bytes(key_bytes.as_ref()) {
+                Ok(key) => match V::from_bytes(value_bytes.as_ref()) {
+                    Ok(value) => Some(Ok((key, value))),
+                    Err(e) => Some(Err(KvError::DeserError(e.into()))),
+                },
+                Err(e) => Some(Err(KvError::DeserError(e.into()))),
+            },
+            Some(Err(e)) => Some(Err(KvError::BackendError(e))),
+            None => None,
+        }
+    }
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key, V: Value>
+    KvIter<C, KB, VB, BI, K, V>
+{
+    pub fn new(backend_iter: BI) -> Self {
+        Self {
+            backend_iter,
+            phantom: PhantomData,
+        }
+    }
+}
+
+pub trait EntryIter {
+    type K: Key;
+    type V: Value;
+    type KeysIter: Iterator<Item = KvResult<Self::K>>;
+    type ValuesIter: Iterator<Item = KvResult<Self::V>>;
+
+    fn keys(self) -> Self::KeysIter;
+    fn values(self) -> Self::ValuesIter;
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key, V: Value>
+    EntryIter for KvIter<C, KB, VB, BI, K, V>
+{
+    type K = K;
+    type V = V;
+    type KeysIter = KvIterKeys<C, KB, VB, BI, K>;
+    type ValuesIter = KvIterValues<C, KB, VB, BI, K, V>;
+
+    fn keys(self) -> KvIterKeys<C, KB, VB, BI, K> {
+        KvIterKeys::new(self.backend_iter)
+    }
+    fn values(self) -> KvIterValues<C, KB, VB, BI, K, V> {
+        KvIterValues::new(self.backend_iter)
+    }
+}
+
+pub(crate) fn convert_range<K: Key, RK: RangeBounds<K>>(range: RK) -> RangeBytes {
+    let range_start = convert_bound(range.start_bound());
+    let range_end = convert_bound(range.end_bound());
+    (range_start, range_end)
+}
+
+#[inline(always)]
+fn convert_bound<K: Key>(bound_key: Bound<&K>) -> Bound<IVec> {
+    match bound_key {
+        Bound::Included(key) => Bound::Included(key.as_bytes(|key_bytes| key_bytes.into())),
+        Bound::Excluded(key) => Bound::Excluded(key.as_bytes(|key_bytes| key_bytes.into())),
+        Bound::Unbounded => Bound::Unbounded,
+    }
+}
+
+#[allow(dead_code, missing_debug_implementations)]
+pub struct KvIterRefSlice<'db, BC, D, K, V, F, R>
+where
+    BC: BackendCol,
+    K: KeyZc,
+    V: ValueSliceZc,
+    F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+{
+    pub(crate) inner: KvInnerIterRefSlice<BC, D, K, V, F>,
+    pub(crate) reader: OwnedOrRef<'db, R>,
+}
+impl<'db, BC, D, K, V, F, R> Iterator for KvIterRefSlice<'db, BC, D, K, V, F, R>
+where
+    BC: BackendCol,
+    K: KeyZc,
+    V: ValueSliceZc,
+    F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+{
+    type Item = KvResult<D>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        self.inner.next()
+    }
+}
+
+#[allow(missing_debug_implementations)]
+pub struct KvInnerIterRefSlice<BC, D, K, V, F>
+where
+    BC: BackendCol,
+    K: KeyZc,
+    V: ValueSliceZc,
+    F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+{
+    pub(crate) backend_iter: BC::Iter,
+    pub(crate) f: F,
+    pub(crate) phantom: PhantomData<(D, K, V)>,
+}
+impl<BC, D, K, V, F> Iterator for KvInnerIterRefSlice<BC, D, K, V, F>
+where
+    BC: BackendCol,
+    K: KeyZc,
+    V: ValueSliceZc,
+    F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+{
+    type Item = KvResult<D>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.backend_iter.next() {
+            Some(Ok((k_bytes, v_bytes))) => {
+                if let Some(k_layout) = zerocopy::LayoutVerified::<_, K::Ref>::new(k_bytes.as_ref())
+                {
+                    if let Some(v_layout) = zerocopy::LayoutVerified::<_, [V::Elem]>::new_slice(
+                        &v_bytes.as_ref()[V::prefix_len()..],
+                    ) {
+                        Some((self.f)(&k_layout, &v_layout))
+                    } else {
+                        Some(Err(KvError::DeserError(
+                            "Bytes are invalid length or alignment.".into(),
+                        )))
+                    }
+                } else {
+                    Some(Err(KvError::DeserError(
+                        "Bytes are invalid length or alignment.".into(),
+                    )))
+                }
+            }
+            Some(Err(e)) => Some(Err(KvError::BackendError(e))),
+            None => None,
+        }
+    }
+}
+
+impl<BC, D, K, V, F> ReversableIterator for KvInnerIterRefSlice<BC, D, K, V, F>
+where
+    BC: BackendCol,
+    K: KeyZc,
+    V: ValueSliceZc,
+    F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+{
+    #[inline(always)]
+    fn reverse(self) -> Self {
+        Self {
+            backend_iter: self.backend_iter.reverse(),
+            f: self.f,
+            phantom: PhantomData,
+        }
+    }
+}
diff --git a/tools/kv_typed/src/iter/keys.rs b/tools/kv_typed/src/iter/keys.rs
new file mode 100644
index 0000000000000000000000000000000000000000..4e2cf10d28f55bfee211b7c7885b95b12889a48f
--- /dev/null
+++ b/tools/kv_typed/src/iter/keys.rs
@@ -0,0 +1,53 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed iterators
+
+use crate::*;
+
+#[derive(Debug)]
+pub struct KvIterKeys<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key>
+{
+    backend_iter: BI,
+    phantom: PhantomData<(C, KB, VB, K)>,
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key> Iterator
+    for KvIterKeys<C, KB, VB, BI, K>
+{
+    type Item = KvResult<K>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.backend_iter.next() {
+            Some(Ok((key_bytes, _value_bytes))) => match K::from_bytes(key_bytes.as_ref()) {
+                Ok(key) => Some(Ok(key)),
+                Err(e) => Some(Err(KvError::DeserError(e.into()))),
+            },
+            Some(Err(e)) => Some(Err(KvError::BackendError(e))),
+            None => None,
+        }
+    }
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key>
+    KvIterKeys<C, KB, VB, BI, K>
+{
+    pub(super) fn new(backend_iter: BI) -> Self {
+        Self {
+            backend_iter,
+            phantom: PhantomData,
+        }
+    }
+}
diff --git a/tools/kv_typed/src/iter/values.rs b/tools/kv_typed/src/iter/values.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a96f608df4beca566e5ab20ddedc242766f1ef52
--- /dev/null
+++ b/tools/kv_typed/src/iter/values.rs
@@ -0,0 +1,59 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed iterators
+
+use crate::*;
+
+#[derive(Debug)]
+pub struct KvIterValues<
+    C: BackendCol,
+    KB: KeyBytes,
+    VB: ValueBytes,
+    BI: BackendIter<KB, VB>,
+    K: Key,
+    V: Value,
+> {
+    backend_iter: BI,
+    phantom: PhantomData<(C, KB, VB, K, V)>,
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key, V: Value>
+    Iterator for KvIterValues<C, KB, VB, BI, K, V>
+{
+    type Item = KvResult<V>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        match self.backend_iter.next() {
+            Some(Ok((_key_bytes, value_bytes))) => match V::from_bytes(value_bytes.as_ref()) {
+                Ok(value) => Some(Ok(value)),
+                Err(e) => Some(Err(KvError::DeserError(e.into()))),
+            },
+            Some(Err(e)) => Some(Err(KvError::BackendError(e))),
+            None => None,
+        }
+    }
+}
+
+impl<C: BackendCol, KB: KeyBytes, VB: ValueBytes, BI: BackendIter<KB, VB>, K: Key, V: Value>
+    KvIterValues<C, KB, VB, BI, K, V>
+{
+    pub(super) fn new(backend_iter: BI) -> Self {
+        Self {
+            backend_iter,
+            phantom: PhantomData,
+        }
+    }
+}
diff --git a/tools/kv_typed/src/key.rs b/tools/kv_typed/src/key.rs
new file mode 100644
index 0000000000000000000000000000000000000000..72d2bf3cb1387e68497c281fe95d8516254428b1
--- /dev/null
+++ b/tools/kv_typed/src/key.rs
@@ -0,0 +1,93 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed Key trait
+
+use crate::*;
+
+/// Trait to be implemented by the collection key
+
+#[cfg(not(feature = "explorer"))]
+pub trait Key:
+    'static + AsBytes + Debug + Eq + FromBytes + std::hash::Hash + Send + Sync + Sized
+{
+}
+
+#[cfg(feature = "explorer")]
+pub trait Key:
+    'static + AsBytes + Debug + Eq + ExplorableKey + FromBytes + std::hash::Hash + Send + Sync + Sized
+{
+}
+
+#[cfg(not(feature = "explorer"))]
+impl<T> Key for T where
+    T: 'static + AsBytes + Debug + Eq + FromBytes + std::hash::Hash + Send + Sync + Sized
+{
+}
+
+#[cfg(feature = "explorer")]
+impl<T> Key for T where
+    T: 'static
+        + AsBytes
+        + Debug
+        + Eq
+        + ExplorableKey
+        + FromBytes
+        + std::hash::Hash
+        + Send
+        + Sync
+        + Sized
+{
+}
+
+pub trait KeyZc: Key {
+    type Ref: Sized + zerocopy::AsBytes + zerocopy::FromBytes;
+}
+
+impl KeyZc for () {
+    type Ref = ();
+}
+
+#[derive(
+    Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, zerocopy::AsBytes, zerocopy::FromBytes,
+)]
+#[repr(transparent)]
+pub struct U32BE(pub u32);
+
+impl From<&zerocopy::U32<byteorder::BigEndian>> for U32BE {
+    fn from(u32_zc: &zerocopy::U32<byteorder::BigEndian>) -> Self {
+        U32BE(u32_zc.get())
+    }
+}
+
+impl KeyZc for U32BE {
+    type Ref = zerocopy::U32<byteorder::BigEndian>;
+}
+
+#[derive(
+    Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, zerocopy::AsBytes, zerocopy::FromBytes,
+)]
+#[repr(transparent)]
+pub struct U64BE(pub u64);
+
+impl From<&zerocopy::U64<byteorder::BigEndian>> for U64BE {
+    fn from(u32_zc: &zerocopy::U64<byteorder::BigEndian>) -> Self {
+        U64BE(u32_zc.get())
+    }
+}
+
+impl KeyZc for U64BE {
+    type Ref = zerocopy::U64<byteorder::BigEndian>;
+}
diff --git a/tools/kv_typed/src/lib.rs b/tools/kv_typed/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b2487256d0b82c087994f9be3e99a1d18a5d0ebf
--- /dev/null
+++ b/tools/kv_typed/src/lib.rs
@@ -0,0 +1,153 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Strongly typed key-value storage
+
+#![allow(clippy::upper_case_acronyms, clippy::from_over_into)]
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    missing_debug_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces,
+    unused_qualifications
+)]
+
+mod as_bytes;
+pub mod backend;
+mod batch;
+mod bytes;
+mod collection_inner;
+mod collection_ro;
+mod collection_rw;
+mod db_schema;
+mod error;
+mod event;
+#[cfg(feature = "explorer")]
+pub mod explorer;
+mod from_bytes;
+mod iter;
+mod key;
+mod subscription;
+mod transactional_read;
+mod transactional_write;
+mod utils;
+mod value;
+
+// Re-export dependencies
+pub use flume as channel;
+#[cfg(feature = "explorer")]
+pub use regex;
+pub use zerocopy;
+
+/// Kv Typed prelude
+pub mod prelude {
+    pub use crate::as_bytes::AsBytes;
+    #[cfg(feature = "leveldb_backend")]
+    pub use crate::backend::leveldb::{LevelDb, LevelDbConf};
+    #[cfg(feature = "lmdb_backend")]
+    pub use crate::backend::lmdb::{Lmdb, LmdbConf};
+    pub use crate::backend::memory::{Mem, MemConf};
+    pub use crate::backend::memory_singleton::{MemSingleton, MemSingletonConf};
+    #[cfg(feature = "mock")]
+    pub use crate::backend::mock::{MockBackend, MockBackendCol, MockBackendIter};
+    #[cfg(feature = "sled_backend")]
+    pub use crate::backend::sled::{Config as SledConf, Sled};
+    pub use crate::backend::{Backend, BackendCol};
+    pub use crate::batch::{Batch, BatchGet};
+    #[cfg(feature = "mock")]
+    pub use crate::collection_ro::MockColRo;
+    pub use crate::collection_ro::{
+        ColRo, DbCollectionRo, DbCollectionRoGetRef, DbCollectionRoGetRefSlice,
+        DbCollectionRoIterRefSlice,
+    };
+    pub use crate::collection_rw::{ColRw, DbCollectionRw};
+    pub use crate::error::{DynErr, KvError, KvResult};
+    pub use crate::event::{EventTrait, Events};
+    #[cfg(feature = "explorer")]
+    pub use crate::explorer::{
+        ExplorableKey, ExplorableValue, ExplorerActionErr, FromExplorerKeyErr, FromExplorerValueErr,
+    };
+    pub use crate::from_bytes::{FromBytes, LayoutVerifiedErr};
+    pub use crate::iter::{
+        keys::KvIterKeys, values::KvIterValues, EntryIter, KvIter, KvIterRefSlice, ResultIter,
+    };
+    pub use crate::key::{Key, KeyZc, U32BE, U64BE};
+    pub use crate::subscription::{NewSubscribers, Subscriber, Subscribers};
+    pub use crate::transactional_read::{TransactionalRead, TxColRo};
+    pub use crate::transactional_write::{DbTxCollectionRw, TransactionalWrite, TxColRw};
+    pub use crate::utils::arc::Arc;
+    pub use crate::value::{Value, ValueSliceZc, ValueZc};
+    pub use crate::OwnedOrRef;
+}
+
+// Internal crate imports
+pub(crate) use crate::backend::{BackendBatch, BackendIter};
+pub(crate) use crate::bytes::{CowKB, CowVB, KeyBytes, ValueBytes};
+pub(crate) use crate::collection_inner::ColInner;
+pub(crate) use crate::error::BackendResult;
+#[cfg(feature = "explorer")]
+pub(crate) use crate::explorer::{ExplorableKey, ExplorableValue};
+pub(crate) use crate::iter::{KvInnerIterRefSlice, RangeBytes, ReversableIterator};
+pub(crate) use crate::prelude::*;
+pub(crate) use crate::subscription::{ColSubscribers, SubscriptionsSender};
+pub(crate) use crate::transactional_write::tx_iter::BackendTxIter;
+pub(crate) use crate::utils::arc::Arc;
+pub(crate) use crate::utils::ivec::IVec;
+use flume::{unbounded, Receiver, Sender, TrySendError};
+pub(crate) use smallvec::SmallVec;
+pub(crate) use std::{
+    collections::{BTreeSet, HashSet},
+    convert::TryInto,
+    error::Error,
+    fmt::{Debug, Display},
+    marker::PhantomData,
+    ops::{Bound, RangeBounds},
+    str::FromStr,
+};
+pub(crate) use thiserror::Error;
+
+pub enum OwnedOrRef<'a, T> {
+    Owned(T),
+    Borrow(&'a T),
+}
+impl<'a, T> AsRef<T> for OwnedOrRef<'a, T> {
+    fn as_ref(&self) -> &T {
+        match self {
+            Self::Owned(t) => t,
+            Self::Borrow(t) => *t,
+        }
+    }
+}
+impl<'a, T: Debug> Debug for OwnedOrRef<'a, T> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            Self::Owned(t) => write!(f, "OwnedOrRef::Owned({:?})", t),
+            Self::Borrow(t) => write!(f, "OwnedOrRef::Borrow({:?})", t),
+        }
+    }
+}
+impl<'a, T> From<&'a T> for OwnedOrRef<'a, T> {
+    fn from(borrow: &'a T) -> Self {
+        Self::Borrow(borrow)
+    }
+}
+impl<T> From<T> for OwnedOrRef<'_, T> {
+    fn from(owned: T) -> Self {
+        Self::Owned(owned)
+    }
+}
diff --git a/tools/kv_typed/src/subscription.rs b/tools/kv_typed/src/subscription.rs
new file mode 100644
index 0000000000000000000000000000000000000000..049aa446ae9238c255cc248bdf144a0a41d9b776
--- /dev/null
+++ b/tools/kv_typed/src/subscription.rs
@@ -0,0 +1,102 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed subscription
+
+use crate::*;
+
+/// Subscriber
+pub type Subscriber<E> = Sender<Arc<Events<E>>>;
+/// Subscriptions sender
+pub(crate) type SubscriptionsSender<E> = Sender<Subscriber<E>>;
+/// Subscribers
+pub type Subscribers<E> = std::collections::BTreeMap<usize, Subscriber<E>>;
+/// New subscribers
+pub type NewSubscribers<E> = SmallVec<[Subscriber<E>; 4]>;
+
+#[derive(Debug)]
+#[doc(hidden)]
+pub struct ColSubscribers<E: EventTrait> {
+    subscription_sender: SubscriptionsSender<E>,
+    subscription_receiver: Receiver<Subscriber<E>>,
+    subscribers: Subscribers<E>,
+    subscriber_index: usize,
+}
+
+impl<E: EventTrait> Default for ColSubscribers<E> {
+    fn default() -> Self {
+        let (subscription_sender, subscription_receiver) = unbounded();
+        ColSubscribers {
+            subscription_sender,
+            subscription_receiver,
+            subscribers: std::collections::BTreeMap::new(),
+            subscriber_index: 0,
+        }
+    }
+}
+
+impl<E: EventTrait> ColSubscribers<E> {
+    pub(crate) fn get_subscription_sender(&self) -> Sender<Subscriber<E>> {
+        self.subscription_sender.clone()
+    }
+    #[inline(always)]
+    pub(crate) fn get_new_subscribers(&self) -> NewSubscribers<E> {
+        if !self.subscription_receiver.is_empty() {
+            let mut new_subscribers = SmallVec::new();
+            while let Ok(subscriber) = self.subscription_receiver.try_recv() {
+                new_subscribers.push(subscriber)
+            }
+            new_subscribers
+        } else {
+            SmallVec::new()
+        }
+    }
+    pub(crate) fn notify_subscribers(&self, events: Arc<Events<E>>) -> Vec<usize> {
+        let mut died_subscribers = Vec::with_capacity(self.subscribers.len());
+        let mut unsend_events_opt = None;
+        for (id, subscriber) in &self.subscribers {
+            if let Err(e) = subscriber.try_send(
+                unsend_events_opt
+                    .take()
+                    .unwrap_or_else(|| Arc::clone(&events)),
+            ) {
+                match e {
+                    TrySendError::Disconnected(events_) => {
+                        unsend_events_opt = Some(events_);
+                        died_subscribers.push(*id);
+                    }
+                    TrySendError::Full(events_) => {
+                        unsend_events_opt = Some(events_);
+                    }
+                }
+            }
+        }
+        died_subscribers
+    }
+    #[inline(always)]
+    pub(crate) fn add_new_subscribers(&mut self) {
+        for new_subscriber in self.get_new_subscribers() {
+            self.subscribers
+                .insert(self.subscriber_index, new_subscriber);
+            self.subscriber_index += 1;
+        }
+    }
+    #[inline(always)]
+    pub(crate) fn prune_subscribers(&mut self, died_subscribers: Vec<usize>) {
+        for died_subscriber in died_subscribers {
+            self.subscribers.remove(&died_subscriber);
+        }
+    }
+}
diff --git a/tools/kv_typed/src/transactional_read.rs b/tools/kv_typed/src/transactional_read.rs
new file mode 100644
index 0000000000000000000000000000000000000000..647861f9b3c8d7c6518e3063e8fdcaf4639b70f8
--- /dev/null
+++ b/tools/kv_typed/src/transactional_read.rs
@@ -0,0 +1,211 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed transactional read
+
+use crate::*;
+use parking_lot::RwLockReadGuard as ReadGuard;
+
+type TxColRoReader<'r, BC, E> = parking_lot::RwLockReadGuard<'r, ColInner<BC, E>>;
+
+pub struct TxColRo<'tx, BC: BackendCol, E: EventTrait> {
+    col_reader: ReadGuard<'tx, ColInner<BC, E>>,
+}
+impl<'tx, BC: BackendCol, E: EventTrait> Debug for TxColRo<'tx, BC, E> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LevelDbCol")
+            .field("col_reader", &format!("{:?}", self.col_reader))
+            .finish()
+    }
+}
+impl<'tx, BC: BackendCol, E: EventTrait> TxColRo<'tx, BC, E> {
+    #[inline(always)]
+    fn new(col_reader: ReadGuard<'tx, ColInner<BC, E>>) -> Self {
+        TxColRo { col_reader }
+    }
+    #[inline(always)]
+    pub fn count(&self) -> KvResult<usize> {
+        self.col_reader.backend_col.count()
+    }
+    #[inline(always)]
+    pub fn get(&self, k: &E::K) -> KvResult<Option<E::V>> {
+        self.col_reader.backend_col.get(k)
+    }
+    #[allow(clippy::type_complexity)]
+    #[inline(always)]
+    /// Don't worry about complex iter type. Use it like an `impl Iterator<Item=KvResult<(K, V)>>`.
+    pub fn iter<D, R, F>(&self, range: R, f: F) -> D
+    where
+        D: Send + Sync,
+        R: 'static + RangeBounds<E::K>,
+        F: FnOnce(KvIter<BC, BC::KeyBytes, BC::ValueBytes, BC::Iter, E::K, E::V>) -> D,
+    {
+        let range_bytes = crate::iter::convert_range::<E::K, R>(range);
+        let backend_iter = self.col_reader.backend_col.iter::<E::K, E::V>(range_bytes);
+        f(KvIter::new(backend_iter))
+    }
+    #[allow(clippy::type_complexity)]
+    #[inline(always)]
+    /// Don't worry about complex iter type. Use it like an `impl Iterator<Item=KvResult<(K, V)>>`.
+    pub fn iter_rev<D, R, F>(&self, range: R, f: F) -> D
+    where
+        D: Send + Sync,
+        R: 'static + RangeBounds<E::K>,
+        F: FnOnce(KvIter<BC, BC::KeyBytes, BC::ValueBytes, BC::Iter, E::K, E::V>) -> D,
+    {
+        let range_bytes = crate::iter::convert_range::<E::K, R>(range);
+        let backend_iter = self
+            .col_reader
+            .backend_col
+            .iter::<E::K, E::V>(range_bytes)
+            .reverse();
+        f(KvIter::new(backend_iter))
+    }
+}
+impl<'tx, V: ValueZc, BC: BackendCol, E: EventTrait<V = V>> TxColRo<'tx, BC, E> {
+    pub fn get_ref<D, F: Fn(&V::Ref) -> KvResult<D>>(&self, k: &E::K, f: F) -> KvResult<Option<D>> {
+        self.col_reader.backend_col.get_ref::<E::K, V, D, F>(k, f)
+    }
+}
+impl<'tx, K: KeyZc, V: ValueSliceZc, BC: BackendCol, E: EventTrait<K = K, V = V>>
+    TxColRo<'tx, BC, E>
+{
+    pub fn iter_ref_slice<D, R, F>(
+        &self,
+        range: R,
+        f: F,
+    ) -> KvIterRefSlice<BC, D, K, V, F, TxColRoReader<BC, E>>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        R: 'static + RangeBounds<K>,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+    {
+        let range: RangeBytes = crate::iter::convert_range::<K, R>(range);
+        let inner_iter = self
+            .col_reader
+            .backend_col
+            .iter_ref_slice::<D, K, V, F>(range, f);
+
+        KvIterRefSlice {
+            inner: inner_iter,
+            reader: OwnedOrRef::Borrow(&self.col_reader),
+        }
+    }
+    pub fn iter_ref_slice_rev<D, R, F>(
+        &self,
+        range: R,
+        f: F,
+    ) -> KvIterRefSlice<BC, D, K, V, F, TxColRoReader<BC, E>>
+    where
+        K: KeyZc,
+        V: ValueSliceZc,
+        R: 'static + RangeBounds<K>,
+        F: FnMut(&K::Ref, &[V::Elem]) -> KvResult<D>,
+    {
+        let range: RangeBytes = crate::iter::convert_range::<K, R>(range);
+        let inner_iter = self
+            .col_reader
+            .backend_col
+            .iter_ref_slice::<D, K, V, F>(range, f)
+            .reverse();
+
+        KvIterRefSlice {
+            inner: inner_iter,
+            reader: OwnedOrRef::Borrow(&self.col_reader),
+        }
+    }
+}
+
+impl<'tx, V: ValueSliceZc, BC: BackendCol, E: EventTrait<V = V>> TxColRo<'tx, BC, E> {
+    pub fn get_ref_slice<D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &E::K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        self.col_reader
+            .backend_col
+            .get_ref_slice::<E::K, V, D, F>(k, f)
+    }
+}
+
+pub trait TransactionalRead<'tx, BC: BackendCol> {
+    type TxCols;
+
+    fn read<D, F: Fn(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> KvResult<D>;
+
+    fn try_read<D, F: Fn(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> Result<KvResult<D>, F>;
+}
+
+impl<'tx, BC: BackendCol, E: EventTrait> TransactionalRead<'tx, BC> for &'tx ColRo<BC, E> {
+    type TxCols = TxColRo<'tx, BC, E>;
+
+    fn read<D, F: Fn(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> KvResult<D> {
+        let read_guard_0 = self.inner.read();
+
+        f(TxColRo::new(read_guard_0))
+    }
+
+    fn try_read<D, F: Fn(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> Result<KvResult<D>, F> {
+        if let Some(read_guard_0) = self.inner.try_read() {
+            Ok(f(TxColRo::new(read_guard_0)))
+        } else {
+            Err(f)
+        }
+    }
+}
+
+macro_rules! impl_transactional_read {
+    ($($i:literal),*) => {
+        paste::paste! {
+            impl<'tx, BC: BackendCol $( ,[<E $i>]: EventTrait)*> TransactionalRead<'tx, BC>
+                for ($(&'tx ColRo<BC, [<E $i>]>, )*)
+            {
+                type TxCols = ($(TxColRo<'tx, BC,  [<E $i>]>, )*);
+
+                fn read<D, F: Fn(Self::TxCols) -> KvResult<D>>(
+                    &'tx self,
+                    f: F,
+                ) -> KvResult<D> {
+                    $(let [<read_guard_ $i>] = self.$i.inner.read();)*
+
+                    f(($(TxColRo::new([<read_guard_ $i>]), )*))
+                }
+
+                fn try_read<D, F: Fn(Self::TxCols) -> KvResult<D>>(
+                    &'tx self,
+                    f: F,
+                ) -> Result<KvResult<D>, F> {
+                    $(let [<read_guard_opt_ $i>] = self.$i.inner.try_read();)*
+
+                    if $([<read_guard_opt_ $i>].is_none() || )* false {
+                        Err(f)
+                    } else {
+                        Ok(f(($(TxColRo::new([<read_guard_opt_ $i>].expect("unreachable")), )*)))
+                    }
+                }
+            }
+        }
+    };
+}
+impl_transactional_read!(0, 1);
+impl_transactional_read!(0, 1, 2);
+impl_transactional_read!(0, 1, 2, 3);
+impl_transactional_read!(0, 1, 2, 3, 4);
+impl_transactional_read!(0, 1, 2, 3, 4, 5);
+impl_transactional_read!(0, 1, 2, 3, 4, 5, 6);
+impl_transactional_read!(0, 1, 2, 3, 4, 5, 6, 7);
+impl_transactional_read!(0, 1, 2, 3, 4, 5, 6, 7, 8);
+impl_transactional_read!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
diff --git a/tools/kv_typed/src/transactional_write.rs b/tools/kv_typed/src/transactional_write.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2595e630250a999f61ca548bda105b63741b3513
--- /dev/null
+++ b/tools/kv_typed/src/transactional_write.rs
@@ -0,0 +1,237 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed transactional write
+
+pub(crate) mod tx_iter;
+
+use crate::*;
+use parking_lot::RwLockUpgradableReadGuard as UpgradableReadGuard;
+
+pub struct TxColRw<'tx, BC: BackendCol, E: EventTrait> {
+    batch: &'static mut Batch<BC, ColRw<BC, E>>,
+    col_reader: &'tx UpgradableReadGuard<'tx, ColInner<BC, E>>,
+}
+
+impl<'tx, BC: BackendCol, E: EventTrait> TxColRw<'tx, BC, E> {
+    #[doc(hidden)]
+    /// For internal usage only MUST NOT USE
+    pub fn new(
+        batch: &mut Batch<BC, ColRw<BC, E>>,
+        upgradable_guard: &UpgradableReadGuard<ColInner<BC, E>>,
+    ) -> Self {
+        TxColRw {
+            batch: unsafe { std::mem::transmute(batch) },
+            col_reader: unsafe { std::mem::transmute(upgradable_guard) },
+        }
+    }
+}
+
+impl<'tx, BC: BackendCol, E: EventTrait> Debug for TxColRw<'tx, BC, E> {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        f.debug_struct("LevelDbCol")
+            .field("batch", &format!("{:?}", self.batch))
+            .field("col_reader", &format!("{:?}", self.col_reader))
+            .finish()
+    }
+}
+
+impl<'tx, V: ValueZc, BC: BackendCol, E: EventTrait<V = V>> TxColRw<'tx, BC, E> {
+    pub fn get_ref<D, F: Fn(&V::Ref) -> KvResult<D>>(&self, k: &E::K, f: F) -> KvResult<Option<D>> {
+        self.col_reader.backend_col.get_ref::<E::K, V, D, F>(k, f)
+    }
+}
+impl<'tx, V: ValueSliceZc, BC: BackendCol, E: EventTrait<V = V>> TxColRw<'tx, BC, E> {
+    pub fn get_ref_slice<D, F: Fn(&[V::Elem]) -> KvResult<D>>(
+        &self,
+        k: &E::K,
+        f: F,
+    ) -> KvResult<Option<D>> {
+        self.col_reader
+            .backend_col
+            .get_ref_slice::<E::K, V, D, F>(k, f)
+    }
+}
+
+impl<'tx, BC: BackendCol, E: EventTrait> TxColRw<'tx, BC, E> {
+    #[inline(always)]
+    pub fn count(&self) -> KvResult<usize> {
+        self.col_reader.backend_col.count()
+    }
+    #[inline(always)]
+    pub fn get(&self, k: &E::K) -> KvResult<Option<E::V>> {
+        match self.batch.get(k) {
+            batch::BatchGet::None => self.col_reader.backend_col.get(k),
+            batch::BatchGet::Deleted => Ok(None),
+            batch::BatchGet::Updated(v) => Ok(Some(v.as_bytes(|v_bytes| {
+                E::V::from_bytes(v_bytes).map_err(|e| KvError::DeserError(e.into()))
+            })?)),
+        }
+    }
+    #[allow(clippy::type_complexity)]
+    #[inline(always)]
+    /// Don't worry about complex iter type. Use it like an `impl Iterator<Item=KvResult<(K, V)>>`.
+    pub fn iter<D, R, F>(&'tx self, range: R, f: F) -> D
+    where
+        D: Send + Sync,
+        R: 'static + RangeBounds<E::K>,
+        F: FnOnce(
+            KvIter<
+                BC,
+                CowKB<'tx, BC::KeyBytes>,
+                CowVB<'tx, BC::ValueBytes>,
+                BackendTxIter<BC>,
+                E::K,
+                E::V,
+            >,
+        ) -> D,
+    {
+        let range_bytes = crate::iter::convert_range::<E::K, R>(range);
+        let backend_iter = self.col_reader.backend_col.iter::<E::K, E::V>(range_bytes);
+        f(KvIter::new(BackendTxIter::new(
+            backend_iter,
+            &self.batch.tree,
+        )))
+    }
+    #[allow(clippy::type_complexity)]
+    #[inline(always)]
+    /// Don't worry about complex iter type. Use it like an `impl Iterator<Item=KvResult<(K, V)>>`.
+    pub fn iter_rev<D, R, F>(&'tx self, range: R, f: F) -> D
+    where
+        D: Send + Sync,
+        R: 'static + RangeBounds<E::K>,
+        F: FnOnce(
+            KvIter<
+                BC,
+                CowKB<'tx, BC::KeyBytes>,
+                CowVB<'tx, BC::ValueBytes>,
+                BackendTxIter<BC>,
+                E::K,
+                E::V,
+            >,
+        ) -> D,
+    {
+        let range_bytes = crate::iter::convert_range::<E::K, R>(range);
+        let backend_iter = self.col_reader.backend_col.iter::<E::K, E::V>(range_bytes);
+        f(KvIter::new(
+            BackendTxIter::new(backend_iter, &self.batch.tree).reverse(),
+        ))
+    }
+}
+
+pub trait DbTxCollectionRw {
+    type K: Key;
+    type V: Value;
+    type Event: EventTrait<K = Self::K, V = Self::V>;
+
+    fn remove(&mut self, k: Self::K);
+    fn upsert(&mut self, k: Self::K, v: Self::V);
+}
+
+impl<'tx, BC: BackendCol, E: EventTrait> DbTxCollectionRw for TxColRw<'tx, BC, E> {
+    type K = E::K;
+    type V = E::V;
+    type Event = E;
+
+    #[inline(always)]
+    fn remove(&mut self, k: Self::K) {
+        self.batch.remove(k)
+    }
+    #[inline(always)]
+    fn upsert(&mut self, k: Self::K, v: Self::V) {
+        self.batch.upsert(k, v)
+    }
+}
+
+pub trait TransactionalWrite<'tx, BC: BackendCol> {
+    type TxCols;
+
+    fn write<D, F: FnOnce(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> KvResult<D>;
+}
+
+impl<'tx, BC: BackendCol, E: EventTrait> TransactionalWrite<'tx, BC> for &'tx ColRw<BC, E> {
+    type TxCols = TxColRw<'tx, BC, E>;
+
+    fn write<D, F: FnOnce(Self::TxCols) -> KvResult<D>>(&'tx self, f: F) -> KvResult<D> {
+        let upgradable_guard = self.inner.inner.upgradable_read();
+
+        let mut batch = Batch::<BC, ColRw<BC, E>>::default();
+
+        let tx_col = TxColRw {
+            batch: unsafe { std::mem::transmute(&mut batch) },
+            col_reader: unsafe { std::mem::transmute(&upgradable_guard) },
+        };
+        let data = f(tx_col)?;
+
+        // Prepare commit
+        let (backend_batch, events) = batch.into_backend_batch_and_events();
+
+        // Acquire exclusive lock
+        let mut write_guard = UpgradableReadGuard::upgrade(upgradable_guard);
+
+        // Commit
+        self.write_backend_batch(backend_batch, events, &mut write_guard)?;
+
+        Ok(data)
+    }
+}
+
+macro_rules! impl_transactional_write {
+    ($($i:literal),*) => {
+        paste::paste! {
+            impl<'tx, BC: BackendCol $( ,[<E $i>]: EventTrait)*> TransactionalWrite<'tx, BC>
+                for ($(&'tx ColRw<BC, [<E $i>]>, )*)
+            {
+                type TxCols = ($(TxColRw<'tx, BC,  [<E $i>]>, )*);
+
+                fn write<D, F: FnOnce(Self::TxCols) -> KvResult<D>>(
+                    &'tx self,
+                    f: F,
+                ) -> KvResult<D> {
+                    $(let [<upgradable_guard_ $i>] = self.$i.inner.inner.upgradable_read();)*
+
+                    $(let mut [<batch_ $i>] = Batch::<BC, ColRw<BC, [<E $i>]>>::default();)*
+
+                    $(let [<tx_col $i>] = TxColRw {
+                        batch: unsafe { std::mem::transmute(&mut [<batch_ $i>]) },
+                        col_reader: unsafe { std::mem::transmute(&[<upgradable_guard_ $i>]) },
+                    };)*
+
+                    let data = f(($([<tx_col $i>], )*))?;
+
+                    // Prepare commit
+                    $(let ([<backend_batch_ $i>], [<events_ $i>]) = [<batch_ $i>].into_backend_batch_and_events();)*
+
+                    // Acquire exclusive lock
+                    $(let mut [<write_guard_ $i>] = UpgradableReadGuard::upgrade([<upgradable_guard_ $i>]);)*
+
+                    // Commit
+                    $(self.$i.write_backend_batch([<backend_batch_ $i>], [<events_ $i>], &mut [<write_guard_ $i>])?;)*
+
+                    Ok(data)
+                }
+            }
+        }
+    };
+}
+impl_transactional_write!(0, 1);
+impl_transactional_write!(0, 1, 2);
+impl_transactional_write!(0, 1, 2, 3);
+impl_transactional_write!(0, 1, 2, 3, 4);
+impl_transactional_write!(0, 1, 2, 3, 4, 5);
+impl_transactional_write!(0, 1, 2, 3, 4, 5, 6);
+impl_transactional_write!(0, 1, 2, 3, 4, 5, 6, 7);
+impl_transactional_write!(0, 1, 2, 3, 4, 5, 6, 7, 8);
+impl_transactional_write!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
diff --git a/tools/kv_typed/src/transactional_write/tx_iter.rs b/tools/kv_typed/src/transactional_write/tx_iter.rs
new file mode 100644
index 0000000000000000000000000000000000000000..fe1c28bcf79c30b5a85fdc86baf551313a203596
--- /dev/null
+++ b/tools/kv_typed/src/transactional_write/tx_iter.rs
@@ -0,0 +1,157 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed transactional iterator
+
+use crate::*;
+use std::collections::BTreeMap;
+
+#[doc(hidden)]
+#[derive(Debug)]
+pub struct BackendTxIter<'b, BC: BackendCol> {
+    batch_end_reached: bool,
+    batch_iter: std::collections::btree_map::Iter<'b, IVec, Option<IVec>>,
+    batch_tree_ref: &'b BTreeMap<IVec, Option<IVec>>,
+    backend_iter: BC::Iter,
+    db_end_reached: bool,
+    next_batch_entry_opt: Option<(&'b IVec, &'b Option<IVec>)>,
+    next_db_entry_opt: Option<(BC::KeyBytes, BC::ValueBytes)>,
+    reverted: bool,
+}
+
+impl<'b, BC: BackendCol> BackendTxIter<'b, BC> {
+    pub(crate) fn new(
+        backend_iter: BC::Iter,
+        batch_tree: &'b BTreeMap<IVec, Option<IVec>>,
+    ) -> Self {
+        Self {
+            batch_end_reached: false,
+            batch_iter: batch_tree.iter(),
+            batch_tree_ref: batch_tree,
+            backend_iter,
+            db_end_reached: false,
+            next_batch_entry_opt: None,
+            next_db_entry_opt: None,
+            reverted: false,
+        }
+    }
+}
+
+impl<'b, BC: BackendCol> BackendTxIter<'b, BC> {
+    fn get_next_db_item(&mut self) -> Option<BackendResult<BC>> {
+        match self.backend_iter.next() {
+            Some(Ok(entry)) => {
+                if self.batch_tree_ref.contains_key(entry.0.as_ref()) {
+                    self.get_next_db_item()
+                } else {
+                    Some(Ok(entry))
+                }
+            }
+            o => o,
+        }
+    }
+}
+
+#[allow(type_alias_bounds)]
+type CowBytesEntry<'a, BC: BackendCol> = (CowKB<'a, BC::KeyBytes>, CowVB<'a, BC::ValueBytes>);
+
+impl<'b, BC: BackendCol> Iterator for BackendTxIter<'b, BC> {
+    type Item = Result<CowBytesEntry<'b, BC>, DynErr>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        if self.next_batch_entry_opt.is_none() {
+            self.next_batch_entry_opt = if self.reverted {
+                self.batch_iter.next_back()
+            } else {
+                self.batch_iter.next()
+            };
+        }
+        if self.next_batch_entry_opt.is_none() {
+            self.batch_end_reached = true;
+        }
+        if self.next_db_entry_opt.is_none() {
+            self.next_db_entry_opt = match self.get_next_db_item() {
+                Some(Ok(entry)) => Some(entry),
+                Some(Err(e)) => return Some(Err(e)),
+                None => {
+                    self.db_end_reached = true;
+                    None
+                }
+            };
+        }
+
+        if self.batch_end_reached {
+            if self.db_end_reached {
+                None
+            } else {
+                // Return db item
+                Some(Ok(self
+                    .next_db_entry_opt
+                    .take()
+                    .map(|(k, v)| (CowKB::O(k), CowVB::O(v)))
+                    .expect("unreachable")))
+            }
+        } else if self.db_end_reached {
+            // Return batch item
+            if let Some((k, v_opt)) = self.next_batch_entry_opt.take() {
+                if let Some(v) = v_opt {
+                    Some(Ok((CowKB::B(k.as_ref()), CowVB::B(v.as_ref()))))
+                } else {
+                    self.next()
+                }
+            } else {
+                // batch_end_reached = false
+                unreachable!()
+            }
+        } else if let Some((k_batch, v_batch_opt)) = self.next_batch_entry_opt.take() {
+            if let Some((k_db, v_db)) = self.next_db_entry_opt.take() {
+                if (!self.reverted && k_batch.as_ref() <= k_db.as_ref())
+                    || (self.reverted && k_batch.as_ref() >= k_db.as_ref())
+                {
+                    self.next_db_entry_opt = Some((k_db, v_db));
+                    // Return batch item
+                    if let Some(v_batch) = v_batch_opt {
+                        Some(Ok((CowKB::B(k_batch.as_ref()), CowVB::B(v_batch.as_ref()))))
+                    } else {
+                        self.next()
+                    }
+                } else {
+                    self.next_batch_entry_opt = Some((k_batch, v_batch_opt));
+                    // Return db item
+                    Some(Ok((CowKB::O(k_db), CowVB::O(v_db))))
+                }
+            } else {
+                // db_end_reached = false
+                unreachable!()
+            }
+        } else {
+            // batch_end_reached = false
+            unreachable!()
+        }
+    }
+}
+
+impl<'b, BC: BackendCol> ReversableIterator for BackendTxIter<'b, BC> {
+    fn reverse(mut self) -> Self {
+        self.backend_iter = self.backend_iter.reverse();
+        self.reverted = true;
+        self
+    }
+}
+
+impl<'b, BC: BackendCol> BackendIter<CowKB<'b, BC::KeyBytes>, CowVB<'b, BC::ValueBytes>>
+    for BackendTxIter<'b, BC>
+{
+}
diff --git a/tools/kv_typed/src/utils.rs b/tools/kv_typed/src/utils.rs
new file mode 100644
index 0000000000000000000000000000000000000000..68bd74c49c3a93239ea3bb150c947044037b820e
--- /dev/null
+++ b/tools/kv_typed/src/utils.rs
@@ -0,0 +1,24 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed utils
+
+pub mod arc;
+#[cfg(not(feature = "sled_backend"))]
+pub mod ivec;
+#[cfg(feature = "sled_backend")]
+pub mod ivec {
+    pub use sled::IVec;
+}
diff --git a/tools/kv_typed/src/utils/arc.rs b/tools/kv_typed/src/utils/arc.rs
new file mode 100644
index 0000000000000000000000000000000000000000..74a7b5856c0df43b46174103274e042feabb307e
--- /dev/null
+++ b/tools/kv_typed/src/utils/arc.rs
@@ -0,0 +1,267 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV typed Arc
+
+#![allow(clippy::unwrap_used, dead_code, unsafe_code)]
+/// We use this because we never use the weak count on the std
+/// `Arc`, but we use a LOT of `Arc`'s, so the extra 8 bytes
+/// turn into a huge overhead.
+use std::{
+    alloc::{alloc, dealloc, Layout},
+    convert::TryFrom,
+    fmt::{self, Debug},
+    mem,
+    ops::Deref,
+    ptr,
+    sync::atomic::{AtomicUsize, Ordering},
+};
+
+// we make this repr(C) because we do a raw
+// write to the beginning where we expect
+// the rc to be.
+#[repr(C)]
+struct ArcInner<T: ?Sized> {
+    rc: AtomicUsize,
+    inner: T,
+}
+
+pub struct Arc<T: ?Sized> {
+    ptr: *mut ArcInner<T>,
+}
+
+unsafe impl<T: Send + Sync + ?Sized> Send for Arc<T> {}
+unsafe impl<T: Send + Sync + ?Sized> Sync for Arc<T> {}
+
+impl<T: Debug + ?Sized> Debug for Arc<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
+        Debug::fmt(&**self, f)
+    }
+}
+
+impl<T> Arc<T> {
+    pub fn new(inner: T) -> Arc<T> {
+        let bx = Box::new(ArcInner {
+            inner,
+            rc: AtomicUsize::new(1),
+        });
+        let ptr = Box::into_raw(bx);
+        Arc { ptr }
+    }
+
+    // See std::sync::arc::Arc::copy_from_slice,
+    // "Unsafe because the caller must either take ownership or bind `T: Copy`"
+    unsafe fn copy_from_slice(s: &[T]) -> Arc<[T]> {
+        let align = std::cmp::max(mem::align_of::<T>(), mem::align_of::<AtomicUsize>());
+
+        let rc_width = std::cmp::max(align, mem::size_of::<AtomicUsize>());
+        let data_width = mem::size_of::<T>().checked_mul(s.len()).unwrap();
+
+        let size_unpadded = rc_width.checked_add(data_width).unwrap();
+        // Pad size out to alignment
+        let size_padded = (size_unpadded + align - 1) & !(align - 1);
+
+        let layout = Layout::from_size_align(size_padded, align).unwrap();
+
+        let ptr = alloc(layout);
+
+        assert!(!ptr.is_null(), "failed to allocate Arc");
+        #[allow(clippy::cast_ptr_alignment)]
+        ptr::write(ptr as _, AtomicUsize::new(1));
+
+        let data_ptr = ptr.add(rc_width);
+        ptr::copy_nonoverlapping(s.as_ptr(), data_ptr as _, s.len());
+
+        let fat_ptr: *const ArcInner<[T]> = Arc::fatten(ptr, s.len());
+
+        Arc {
+            ptr: fat_ptr as *mut _,
+        }
+    }
+
+    /// <https://users.rust-lang.org/t/construct-fat-pointer-to-struct/29198/9>
+    #[allow(trivial_casts)]
+    fn fatten(data: *const u8, len: usize) -> *const ArcInner<[T]> {
+        // Requirements of slice::from_raw_parts.
+        assert!(!data.is_null());
+        assert!(isize::try_from(len).is_ok());
+
+        let slice = unsafe { core::slice::from_raw_parts(data as *const (), len) };
+        slice as *const [()] as *const _
+    }
+
+    pub fn into_raw(arc: Arc<T>) -> *const T {
+        let ptr = unsafe { &(*arc.ptr).inner };
+        #[allow(clippy::mem_forget)]
+        mem::forget(arc);
+        ptr
+    }
+
+    #[allow(clippy::missing_safety_doc)]
+    pub unsafe fn from_raw(ptr: *const T) -> Arc<T> {
+        let align = std::cmp::max(mem::align_of::<T>(), mem::align_of::<AtomicUsize>());
+
+        let rc_width = std::cmp::max(align, mem::size_of::<AtomicUsize>());
+
+        let sub_ptr = (ptr as *const u8).sub(rc_width) as *mut ArcInner<T>;
+
+        Arc { ptr: sub_ptr }
+    }
+}
+
+impl<T: ?Sized> Arc<T> {
+    pub fn strong_count(arc: &Arc<T>) -> usize {
+        unsafe { (*arc.ptr).rc.load(Ordering::Acquire) }
+    }
+
+    pub fn get_mut(arc: &mut Arc<T>) -> Option<&mut T> {
+        if Arc::strong_count(arc) == 1 {
+            Some(unsafe { &mut arc.ptr.as_mut().unwrap().inner })
+        } else {
+            None
+        }
+    }
+}
+
+impl<T: ?Sized + Clone> Arc<T> {
+    pub fn make_mut(arc: &mut Arc<T>) -> &mut T {
+        if Arc::strong_count(arc) != 1 {
+            *arc = Arc::new((**arc).clone());
+            assert_eq!(Arc::strong_count(arc), 1);
+        }
+        Arc::get_mut(arc).unwrap()
+    }
+}
+
+impl<T: Default> Default for Arc<T> {
+    fn default() -> Arc<T> {
+        Arc::new(T::default())
+    }
+}
+
+impl<T: ?Sized> Clone for Arc<T> {
+    fn clone(&self) -> Arc<T> {
+        // safe to use Relaxed ordering below because
+        // of the required synchronization for passing
+        // any objects to another thread.
+        let last_count = unsafe { (*self.ptr).rc.fetch_add(1, Ordering::Relaxed) };
+
+        if last_count == usize::max_value() {
+            std::process::abort();
+        }
+
+        Arc { ptr: self.ptr }
+    }
+}
+
+impl<T: ?Sized> Drop for Arc<T> {
+    fn drop(&mut self) {
+        unsafe {
+            let rc = (*self.ptr).rc.fetch_sub(1, Ordering::Release) - 1;
+            if rc == 0 {
+                std::sync::atomic::fence(Ordering::Acquire);
+                Box::from_raw(self.ptr);
+            }
+        }
+    }
+}
+
+impl<T: Copy> From<&[T]> for Arc<[T]> {
+    #[inline]
+    fn from(s: &[T]) -> Arc<[T]> {
+        unsafe { Arc::copy_from_slice(s) }
+    }
+}
+
+#[allow(clippy::fallible_impl_from)]
+impl<T> From<Box<[T]>> for Arc<[T]> {
+    #[inline]
+    fn from(b: Box<[T]>) -> Arc<[T]> {
+        let len = b.len();
+        unsafe {
+            let src = Box::into_raw(b);
+            let value_layout = Layout::for_value(&*src);
+            let align = std::cmp::max(value_layout.align(), mem::align_of::<AtomicUsize>());
+            let rc_width = std::cmp::max(align, mem::size_of::<AtomicUsize>());
+            let unpadded_size = rc_width.checked_add(value_layout.size()).unwrap();
+            // pad the total `Arc` allocation size to the alignment of
+            // `max(value, AtomicUsize)`
+            let size = (unpadded_size + align - 1) & !(align - 1);
+            let dst_layout = Layout::from_size_align(size, align).unwrap();
+            let dst = alloc(dst_layout);
+            assert!(!dst.is_null(), "failed to allocate Arc");
+
+            #[allow(clippy::cast_ptr_alignment)]
+            ptr::write(dst as _, AtomicUsize::new(1));
+            let data_ptr = dst.add(rc_width);
+            ptr::copy_nonoverlapping(src as *const u8, data_ptr, value_layout.size());
+
+            // free the old box memory without running Drop
+            if value_layout.size() != 0 {
+                dealloc(src as *mut u8, value_layout);
+            }
+
+            let fat_ptr: *const ArcInner<[T]> = Arc::fatten(dst, len);
+
+            Arc {
+                ptr: fat_ptr as *mut _,
+            }
+        }
+    }
+}
+
+#[test]
+fn boxed_slice_to_arc_slice() {
+    let box1: Box<[u8]> = Box::new([1, 2, 3]);
+    let arc1: Arc<[u8]> = box1.into();
+    assert_eq!(&*arc1, &*vec![1, 2, 3]);
+    let box2: Box<[u64]> = Box::new([1, 2, 3]);
+    let arc2: Arc<[u64]> = box2.into();
+    assert_eq!(&*arc2, &*vec![1, 2, 3]);
+}
+
+impl<T> From<Vec<T>> for Arc<[T]> {
+    #[inline]
+    fn from(mut v: Vec<T>) -> Arc<[T]> {
+        unsafe {
+            let arc = Arc::copy_from_slice(&v);
+
+            // Allow the Vec to free its memory, but not destroy its contents
+            v.set_len(0);
+
+            arc
+        }
+    }
+}
+
+impl<T: ?Sized> Deref for Arc<T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        unsafe { &(*self.ptr).inner }
+    }
+}
+
+impl<T: ?Sized> std::borrow::Borrow<T> for Arc<T> {
+    fn borrow(&self) -> &T {
+        &**self
+    }
+}
+
+impl<T: ?Sized> AsRef<T> for Arc<T> {
+    fn as_ref(&self) -> &T {
+        &**self
+    }
+}
diff --git a/tools/kv_typed/src/utils/ivec.rs b/tools/kv_typed/src/utils/ivec.rs
new file mode 100644
index 0000000000000000000000000000000000000000..50a8d7528ea75e7f59b839a7326f7eff7a2ba104
--- /dev/null
+++ b/tools/kv_typed/src/utils/ivec.rs
@@ -0,0 +1,314 @@
+use std::{
+    convert::TryFrom,
+    fmt,
+    hash::{Hash, Hasher},
+    ops::{Deref, DerefMut},
+    sync::Arc,
+};
+
+const CUTOFF: usize = 22;
+
+type Inner = [u8; CUTOFF];
+
+/// A buffer that may either be inline or remote and protected
+/// by an Arc
+#[derive(Clone)]
+pub struct IVec(IVecInner);
+
+impl Default for IVec {
+    fn default() -> Self {
+        Self::from(&[])
+    }
+}
+
+impl Hash for IVec {
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        self.deref().hash(state);
+    }
+}
+
+#[derive(Clone)]
+enum IVecInner {
+    Inline(u8, Inner),
+    Remote(Arc<[u8]>),
+    Subslice {
+        base: Arc<[u8]>,
+        offset: usize,
+        len: usize,
+    },
+}
+
+const fn is_inline_candidate(length: usize) -> bool {
+    length <= CUTOFF
+}
+
+impl IVec {
+    pub fn subslice(&self, slice_offset: usize, len: usize) -> Self {
+        assert!(self.len().checked_sub(slice_offset).expect("") >= len);
+
+        let inner = match self.0 {
+            IVecInner::Remote(ref base) => IVecInner::Subslice {
+                base: base.clone(),
+                offset: slice_offset,
+                len,
+            },
+            IVecInner::Inline(_, old_inner) => {
+                // old length already checked above in assertion
+                let mut new_inner = Inner::default();
+                new_inner[..len].copy_from_slice(&old_inner[slice_offset..slice_offset + len]);
+
+                IVecInner::Inline(u8::try_from(len).expect(""), new_inner)
+            }
+            IVecInner::Subslice {
+                ref base,
+                ref offset,
+                ..
+            } => IVecInner::Subslice {
+                base: base.clone(),
+                offset: offset + slice_offset,
+                len,
+            },
+        };
+
+        IVec(inner)
+    }
+
+    fn inline(slice: &[u8]) -> Self {
+        assert!(is_inline_candidate(slice.len()));
+
+        let mut data = Inner::default();
+
+        #[allow(unsafe_code)]
+        unsafe {
+            std::ptr::copy_nonoverlapping(slice.as_ptr(), data.as_mut_ptr(), slice.len());
+        }
+
+        Self(IVecInner::Inline(
+            u8::try_from(slice.len()).expect(""),
+            data,
+        ))
+    }
+
+    fn remote(arc: Arc<[u8]>) -> Self {
+        Self(IVecInner::Remote(arc))
+    }
+
+    fn make_mut(&mut self) {
+        match self.0 {
+            IVecInner::Remote(ref mut buf) if Arc::strong_count(buf) != 1 => {
+                self.0 = IVecInner::Remote(buf.to_vec().into());
+            }
+            IVecInner::Subslice {
+                ref mut base,
+                offset,
+                len,
+            } if Arc::strong_count(base) != 1 => {
+                self.0 = IVecInner::Remote(base[offset..offset + len].to_vec().into());
+            }
+            _ => {}
+        }
+    }
+}
+
+impl From<Box<[u8]>> for IVec {
+    fn from(b: Box<[u8]>) -> Self {
+        if is_inline_candidate(b.len()) {
+            Self::inline(&b)
+        } else {
+            Self::remote(Arc::from(b))
+        }
+    }
+}
+
+impl From<&[u8]> for IVec {
+    fn from(slice: &[u8]) -> Self {
+        if is_inline_candidate(slice.len()) {
+            Self::inline(slice)
+        } else {
+            Self::remote(Arc::from(slice))
+        }
+    }
+}
+
+impl From<Arc<[u8]>> for IVec {
+    fn from(arc: Arc<[u8]>) -> Self {
+        if is_inline_candidate(arc.len()) {
+            Self::inline(&arc)
+        } else {
+            Self::remote(arc)
+        }
+    }
+}
+
+impl From<&IVec> for IVec {
+    fn from(v: &Self) -> Self {
+        v.clone()
+    }
+}
+
+impl std::borrow::Borrow<[u8]> for IVec {
+    fn borrow(&self) -> &[u8] {
+        self.as_ref()
+    }
+}
+
+impl std::borrow::Borrow<[u8]> for &IVec {
+    fn borrow(&self) -> &[u8] {
+        self.as_ref()
+    }
+}
+
+macro_rules! from_array {
+    ($($s:expr),*) => {
+        $(
+            impl From<&[u8; $s]> for IVec {
+                fn from(v: &[u8; $s]) -> Self {
+                    Self::from(&v[..])
+                }
+            }
+        )*
+    }
+}
+
+from_array!(
+    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+    26, 27, 28, 29, 30, 31, 32
+);
+
+impl Into<Arc<[u8]>> for IVec {
+    fn into(self) -> Arc<[u8]> {
+        match self.0 {
+            IVecInner::Inline(..) => Arc::from(self.as_ref()),
+            IVecInner::Remote(arc) => arc,
+            IVecInner::Subslice { .. } => self.deref().into(),
+        }
+    }
+}
+
+impl Deref for IVec {
+    type Target = [u8];
+
+    #[inline]
+    fn deref(&self) -> &[u8] {
+        self.as_ref()
+    }
+}
+
+impl AsRef<[u8]> for IVec {
+    #[inline]
+    #[allow(unsafe_code)]
+    fn as_ref(&self) -> &[u8] {
+        match &self.0 {
+            IVecInner::Inline(sz, buf) => unsafe { buf.get_unchecked(..*sz as usize) },
+            IVecInner::Remote(buf) => buf,
+            IVecInner::Subslice {
+                ref base,
+                offset,
+                len,
+            } => &base[*offset..*offset + *len],
+        }
+    }
+}
+
+impl DerefMut for IVec {
+    #[inline]
+    fn deref_mut(&mut self) -> &mut [u8] {
+        self.as_mut()
+    }
+}
+
+impl AsMut<[u8]> for IVec {
+    #[inline]
+    #[allow(unsafe_code)]
+    fn as_mut(&mut self) -> &mut [u8] {
+        self.make_mut();
+
+        match &mut self.0 {
+            IVecInner::Inline(ref sz, ref mut buf) => unsafe {
+                std::slice::from_raw_parts_mut(buf.as_mut_ptr(), *sz as usize)
+            },
+            IVecInner::Remote(ref mut buf) => Arc::get_mut(buf).expect(""),
+            IVecInner::Subslice {
+                ref mut base,
+                offset,
+                len,
+            } => &mut Arc::get_mut(base).expect("")[*offset..*offset + *len],
+        }
+    }
+}
+
+impl Ord for IVec {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        self.as_ref().cmp(other.as_ref())
+    }
+}
+
+impl PartialOrd for IVec {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl<T: AsRef<[u8]>> PartialEq<T> for IVec {
+    fn eq(&self, other: &T) -> bool {
+        self.as_ref() == other.as_ref()
+    }
+}
+
+impl PartialEq<[u8]> for IVec {
+    fn eq(&self, other: &[u8]) -> bool {
+        self.as_ref() == other
+    }
+}
+
+impl Eq for IVec {}
+
+impl fmt::Debug for IVec {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.as_ref().fmt(f)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn ivec_usage() {
+        let iv2 = IVec::from(&[4; 128][..]);
+        assert_eq!(iv2, vec![4; 128]);
+    }
+
+    #[test]
+    fn boxed_slice_conversion() {
+        let boite1: Box<[u8]> = Box::new([1, 2, 3]);
+        let iv1: IVec = boite1.into();
+        assert_eq!(iv1, vec![1, 2, 3]);
+        let boite2: Box<[u8]> = Box::new([4; 128]);
+        let iv2: IVec = boite2.into();
+        assert_eq!(iv2, vec![4; 128]);
+    }
+
+    #[test]
+    #[should_panic]
+    fn subslice_usage_00() {
+        let iv1 = IVec::from(&[1, 2, 3][..]);
+        let _subslice = iv1.subslice(0, 4);
+    }
+
+    #[test]
+    #[should_panic]
+    fn subslice_usage_01() {
+        let iv1 = IVec::from(&[1, 2, 3][..]);
+        let _subslice = iv1.subslice(3, 1);
+    }
+
+    #[test]
+    fn ivec_as_mut_identity() {
+        let initial = &[1];
+        let mut iv = IVec::from(initial);
+        assert_eq!(&*initial, &*iv);
+        assert_eq!(&*initial, &mut *iv);
+        assert_eq!(&*initial, iv.as_mut());
+    }
+}
diff --git a/tools/kv_typed/src/value.rs b/tools/kv_typed/src/value.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ec71f8da518204e5a61d37d7b6a9d92a9f639967
--- /dev/null
+++ b/tools/kv_typed/src/value.rs
@@ -0,0 +1,155 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! KV Typed Value trait
+
+use crate::*;
+
+/// Trait to be implemented by the collection value
+#[cfg(not(feature = "explorer"))]
+pub trait Value: 'static + AsBytes + Debug + FromBytes + PartialEq + Send + Sync + Sized {}
+
+#[cfg(feature = "explorer")]
+pub trait Value:
+    'static + AsBytes + Debug + ExplorableValue + FromBytes + PartialEq + Send + Sync + Sized
+{
+}
+
+#[cfg(not(feature = "explorer"))]
+impl<T> Value for T where T: 'static + AsBytes + Debug + FromBytes + PartialEq + Send + Sync + Sized {}
+
+#[cfg(feature = "explorer")]
+impl<T> Value for T where
+    T: 'static + AsBytes + Debug + ExplorableValue + FromBytes + PartialEq + Send + Sync + Sized
+{
+}
+
+pub trait ValueZc: Value {
+    type Ref: Sized + zerocopy::AsBytes + zerocopy::FromBytes;
+}
+
+impl ValueZc for () {
+    type Ref = ();
+}
+
+macro_rules! impl_value_zc_for_numbers {
+    ($($T:ty),*) => {$(
+        impl ValueZc for $T {
+            type Ref = Self;
+        }
+    )*};
+}
+impl_value_zc_for_numbers!(
+    usize, u8, u16, u32, u64, u128, isize, i8, i16, i32, i64, i128, f32, f64
+);
+
+pub trait ValueSliceZc: Value {
+    type Elem: Sized + zerocopy::AsBytes + zerocopy::FromBytes;
+
+    fn prefix_len() -> usize {
+        8
+    }
+}
+
+impl ValueSliceZc for () {
+    type Elem = ();
+
+    fn prefix_len() -> usize {
+        0
+    }
+}
+
+impl ValueSliceZc for String {
+    type Elem = u8;
+
+    fn prefix_len() -> usize {
+        0
+    }
+}
+
+impl<T, E> ValueSliceZc for Vec<T>
+where
+    T: 'static
+        + Copy
+        + Debug
+        + Default
+        + Display
+        + FromStr<Err = E>
+        + PartialEq
+        + Send
+        + Sized
+        + Sync
+        + zerocopy::AsBytes
+        + zerocopy::FromBytes,
+    E: Error + Send + Sync + 'static,
+{
+    type Elem = T;
+
+    fn prefix_len() -> usize {
+        0
+    }
+}
+
+macro_rules! impl_value_slice_zc_for_smallvec {
+    ($($N:literal),*) => {$(
+        impl<T, E> ValueSliceZc for SmallVec<[T; $N]>
+        where
+            T: 'static
+                + Copy
+                + Debug
+                + Default
+                + Display
+                + FromStr<Err = E>
+                + PartialEq
+                + Send
+                + Sized
+                + Sync
+                + zerocopy::AsBytes
+                + zerocopy::FromBytes,
+            E: Error + Send + Sync + 'static,
+        {
+            type Elem = T;
+
+            fn prefix_len() -> usize {
+                0
+            }
+        }
+    )*};
+}
+impl_value_slice_zc_for_smallvec!(2, 4, 8, 16, 32, 64);
+
+impl<T, E> ValueSliceZc for BTreeSet<T>
+where
+    T: 'static
+        + Copy
+        + Debug
+        + Default
+        + Display
+        + FromStr<Err = E>
+        + Ord
+        + PartialEq
+        + Send
+        + Sized
+        + Sync
+        + zerocopy::AsBytes
+        + zerocopy::FromBytes,
+    E: Error + Send + Sync + 'static,
+{
+    type Elem = T;
+
+    fn prefix_len() -> usize {
+        0
+    }
+}
diff --git a/tools/kv_typed/tests/test_db_schema.rs b/tools/kv_typed/tests/test_db_schema.rs
new file mode 100644
index 0000000000000000000000000000000000000000..c312ce200d8316eb3b37f129e456a16c0d5bf296
--- /dev/null
+++ b/tools/kv_typed/tests/test_db_schema.rs
@@ -0,0 +1,218 @@
+use kv_typed::db_schema;
+use kv_typed::prelude::*;
+use std::collections::BTreeSet;
+
+db_schema!(
+    TestV1,
+    [
+        ["c1", Col1, i32, String],
+        ["c2", Col2, usize, ()],
+        ["c3", Col3, U32BE, Vec<u128>],
+        ["c4", Col4, U64BE, BTreeSet<u128>],
+    ]
+);
+
+#[test]
+fn test_macro_db() {
+    #[cfg(feature = "explorer")]
+    {
+        use kv_typed::backend::memory::Mem;
+        use kv_typed::explorer::DbExplorable as _;
+        assert_eq!(
+            TestV1Db::<Mem>::list_collections(),
+            vec![
+                ("col1", "i32", "String"),
+                ("col2", "usize", "()"),
+                ("col3", "U32BE", "Vec<u128>"),
+                ("col4", "U64BE", "BTreeSet<u128>")
+            ]
+        );
+    }
+}
+
+#[test]
+fn test_db_mem() -> KvResult<()> {
+    let db = TestV1Db::<kv_typed::backend::memory::Mem>::open(
+        kv_typed::backend::memory::MemConf::default(),
+    )?;
+
+    test_db(&db)
+}
+
+//#[cfg(feature = "sled_backend")]
+#[test]
+fn test_db_sled() -> KvResult<()> {
+    let db = TestV1Db::<Sled>::open(SledConf::default().temporary(true))?;
+
+    test_db(&db)
+}
+
+fn test_db<B: Backend>(db: &TestV1Db<B>) -> KvResult<()> {
+    let (sender, recv) = kv_typed::channel::unbounded();
+    db.col1().subscribe(sender)?;
+
+    let db2 = db.clone();
+
+    let handler = std::thread::spawn(move || db2.col1_write().upsert(3, "toto".to_owned()));
+    handler.join().expect("thread panic")?;
+
+    let expected_events: Events<Col1Event> = smallvec::smallvec![Col1Event::Upsert {
+        key: 3,
+        value: "toto".to_owned(),
+    }];
+    if let Ok(msg) = recv.recv() {
+        assert_eq!(msg.as_ref(), &expected_events,)
+    } else {
+        panic!("must be receive event")
+    }
+
+    assert_eq!(db.col1().get(&3)?, Some("toto".to_owned()),);
+    let d = db.col1().get_ref_slice(&3, |bytes| {
+        let str_ = unsafe { core::str::from_utf8_unchecked(bytes) };
+        assert_eq!("toto", str_);
+        assert_eq!(db.col2().get(&3)?, None,);
+        Ok(str_.to_owned())
+    })?;
+    assert_eq!(d, Some("toto".to_owned()));
+
+    assert_eq!(db.col2().get(&3)?, None,);
+    db.col2_write().upsert(3, ())?;
+    assert_eq!(db.col2().get(&3)?, Some(()),);
+
+    db.col1_write().upsert(5, "tutu".to_owned())?;
+
+    db.col1().iter(.., |mut iter| {
+        assert_eq!(iter.next_res()?, Some((3, "toto".to_owned())));
+        assert_eq!(iter.next_res()?, Some((5, "tutu".to_owned())));
+        assert_eq!(iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+
+    db.col1().iter_rev(.., |it| {
+        let mut iter = it.values();
+
+        assert_eq!(iter.next_res()?, Some("tutu".to_owned()));
+        assert_eq!(iter.next_res()?, Some("toto".to_owned()));
+        assert_eq!(iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+
+    db.col1_write().upsert(7, "titi".to_owned())?;
+
+    db.col1().iter_rev(.., |it| {
+        let mut iter = it.values().step_by(2);
+
+        assert_eq!(iter.next_res()?, Some("titi".to_owned()));
+        assert_eq!(iter.next_res()?, Some("toto".to_owned()));
+        assert_eq!(iter.next_res()?, None);
+
+        Ok::<(), KvError>(())
+    })?;
+
+    db.col3_write().upsert(U32BE(4), vec![1, 2, 3])?;
+    db.col3().get_ref_slice(&U32BE(4), |numbers| {
+        assert_eq!(numbers, &[1, 2, 3]);
+        Ok(())
+    })?;
+
+    // Test get_ref_slice
+    db.col4_write()
+        .upsert(U64BE(4), (&[3, 2, 4, 1]).iter().copied().collect())?;
+    db.col4().get_ref_slice(&U64BE(4), |numbers| {
+        assert_eq!(numbers, &[1, 2, 3, 4]);
+        Ok(())
+    })?;
+
+    // Test iter_ref_slice
+    let vec: Vec<(U32BE, Vec<u128>)> = db
+        .col3()
+        .iter_ref_slice(.., |k, v_slice| Ok((k.into(), Vec::from(v_slice))))
+        .collect::<KvResult<_>>()?;
+    assert_eq!(vec, vec![(U32BE(4), vec![1, 2, 3])]);
+
+    // Test transactional
+    // A read tx should be opened when write tx not commited
+    let (s1, r1) = flume::bounded::<()>(0);
+    let (s2, r2) = flume::bounded::<()>(0);
+    let db_ro = db.get_ro_handler();
+    let read_task = std::thread::spawn(move || {
+        r1.recv().expect("disconnected");
+        (db_ro.col3(), db_ro.col4(), db_ro.col2()).read(|(c3, c4, _c2)| {
+            c3.get_ref_slice(&U32BE(4), |numbers| {
+                assert_eq!(numbers, &[1, 2, 3]);
+                Ok(())
+            })?;
+            c3.iter(.., |it| {
+                let iter = it.keys();
+                s2.send(()).expect("disconnected");
+                assert_eq!(iter.collect::<KvResult<Vec<_>>>()?, vec![U32BE(4)]);
+                Ok::<(), KvError>(())
+            })?;
+            c4.get_ref_slice(&U64BE(4), |numbers| {
+                assert_eq!(numbers, &[1, 2, 3, 4]);
+                Ok(())
+            })?;
+            Ok(())
+        })
+    });
+
+    let tres: KvResult<()> =
+        (db.col3_write(), db.col4_write(), db.col2_write()).write(|(mut c3, mut c4, _c2)| {
+            s1.send(()).expect("disconnected");
+            assert_eq!(
+                c3.iter(.., |it| it.keys().collect::<KvResult<Vec<_>>>())?,
+                vec![U32BE(4)]
+            );
+            assert_eq!(
+                c3.iter(.., |it| it.values().collect::<KvResult<Vec<_>>>())?,
+                vec![vec![1, 2, 3]]
+            );
+            c3.upsert(U32BE(42), vec![5, 4, 6]);
+            assert_eq!(
+                c3.iter(.., |it| it.keys().collect::<KvResult<Vec<_>>>())?,
+                vec![U32BE(4), U32BE(42)]
+            );
+            assert_eq!(
+                c3.iter_rev(.., |it| it.keys().collect::<KvResult<Vec<_>>>())?,
+                vec![U32BE(42), U32BE(4)]
+            );
+            c3.upsert(U32BE(8), vec![11, 12, 13]);
+            c3.remove(U32BE(4));
+            assert_eq!(
+                c3.iter(.., |it| it.keys().collect::<KvResult<Vec<_>>>())?,
+                vec![U32BE(8), U32BE(42)]
+            );
+            c3.iter_rev(.., |it| {
+                let iter = it.keys();
+                r2.recv().expect("disconnected");
+                assert_eq!(
+                    iter.collect::<KvResult<Vec<_>>>()?,
+                    vec![U32BE(42), U32BE(8)]
+                );
+
+                Ok::<(), KvError>(())
+            })?;
+            c4.upsert(U64BE(4), (&[7, 8, 6, 5]).iter().copied().collect());
+            Ok(())
+        });
+    tres?;
+    read_task.join().expect("read task panic")?;
+
+    // Test clear()
+    db.col4_write().clear()?;
+    assert_eq!(db.col4().count()?, 0);
+
+    // Test transactional 2
+    db.write(|mut db_tx| {
+        db_tx
+            .col4
+            .upsert(U64BE(47), (&[5, 9, 3, 2]).iter().copied().collect());
+        Ok(())
+    })?;
+    db.col4().get_ref_slice(&U64BE(47), |numbers| {
+        assert_eq!(numbers, &[2, 3, 5, 9]);
+        Ok(())
+    })?;
+
+    Ok(())
+}
diff --git a/tools/kv_typed/tests/test_mock.rs b/tools/kv_typed/tests/test_mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8f68164a295009d6fc9c5f0e2ce2dbfa7cdad678
--- /dev/null
+++ b/tools/kv_typed/tests/test_mock.rs
@@ -0,0 +1,54 @@
+#[cfg(feature = "mock")]
+mod tests {
+
+    use kv_typed::prelude::*;
+    use mockall::predicate::*;
+    use std::fmt::Debug;
+    use std::ops::{Bound, RangeFull};
+
+    db_schema!(
+        Test,
+        [["c1", col_1, u32, String], ["c2", col_2, String, u64],]
+    );
+
+    fn use_readable_db<DB: TestDbReadable>(db: &DB) -> KvResult<()> {
+        let col1_reader = db.col_1();
+        assert_eq!(col1_reader.count()?, 899);
+        assert_eq!(col1_reader.get(&42)?, Some("toto".to_owned()));
+        let mut iter = col1_reader.iter(..);
+        assert_eq!(iter.next_res()?, Some((42, "toto".to_owned())));
+        assert_eq!(iter.next_res()?, None);
+        Ok(())
+    }
+
+    #[test]
+    fn test_mock_db() -> KvResult<()> {
+        let mut db = MockTestDbReadable::new();
+        db.expect_col_1().times(1).returning(|| {
+            let mut col_1 = MockColRo::<Col1Event>::new();
+            col_1.expect_count().times(1).returning(|| Ok(899));
+            col_1
+                .expect_get()
+                .times(1)
+                .returning(|_| Ok(Some("toto".to_owned())));
+            col_1.expect_iter::<RangeFull>().times(1).returning(|_| {
+                let mut b_iter = MockBackendIter::new();
+                #[allow(clippy::string_lit_as_bytes)]
+                let mut items = vec![
+                    None,
+                    Some(Ok((vec![0u8, 0, 0, 42], "toto".as_bytes().to_vec()))),
+                ];
+                b_iter
+                    .expect_next()
+                    .times(2)
+                    .returning(move || items.pop().unwrap_or(None));
+                KvIter::new(b_iter, (Bound::Unbounded, Bound::Unbounded))
+            });
+            col_1
+        });
+
+        use_readable_db(&db)?;
+
+        Ok(())
+    }
+}