diff --git a/.cargo/config b/.cargo/config
index f7bc4e9dc44711432934d062e6ca3a245d9e9843..1798a5f3a5bfedd7cba46646e4a6d286ddfb50fd 100644
--- a/.cargo/config
+++ b/.cargo/config
@@ -1,6 +1,6 @@
 [alias]
 bdex = "build --release --package duniter-dbex"
-ca = "clippy --all"
+ca = "check --all"
 cn = "check --manifest-path neon/native/Cargo.toml"
 dex = "run --release --package duniter-dbex --"
 ta = "test --all"
diff --git a/Cargo.lock b/Cargo.lock
index 2d258d432ec31c2cfbf9982c425c963a19c5f0a7..e73f5eb58a6472b91fe5cca496716d1bf50813f4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -358,6 +358,12 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "bitflags"
+version = "0.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
+
 [[package]]
 name = "bitflags"
 version = "1.2.1"
@@ -560,7 +566,7 @@ checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
 dependencies = [
  "ansi_term",
  "atty",
- "bitflags",
+ "bitflags 1.2.1",
  "strsim 0.8.0",
  "textwrap",
  "unicode-width",
@@ -573,7 +579,7 @@ version = "0.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
 dependencies = [
- "bitflags",
+ "bitflags 1.2.1",
 ]
 
 [[package]]
@@ -582,7 +588,7 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467"
 dependencies = [
- "bitflags",
+ "bitflags 1.2.1",
 ]
 
 [[package]]
@@ -724,7 +730,7 @@ version = "0.17.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6f4919d60f26ae233e14233cc39746c8c8bb8cd7b05840ace83604917b51b6c7"
 dependencies = [
- "bitflags",
+ "bitflags 1.2.1",
  "crossterm_winapi",
  "lazy_static",
  "libc",
@@ -1042,13 +1048,12 @@ dependencies = [
 ]
 
 [[package]]
-name = "duniter-dbs-writer"
+name = "duniter-dbs-write-ops"
 version = "0.1.0"
 dependencies = [
  "chrono",
  "dubp",
  "duniter-dbs",
- "flume",
  "log",
  "resiter",
  "serde_json",
@@ -1063,7 +1068,8 @@ dependencies = [
  "dubp",
  "duniter-dbs",
  "duniter-dbs-read-ops",
- "duniter-dbs-writer",
+ "duniter-mempools",
+ "fast-threadpool",
  "flume",
  "futures",
  "http",
@@ -1090,6 +1096,18 @@ dependencies = [
  "structopt",
 ]
 
+[[package]]
+name = "duniter-mempools"
+version = "0.1.0"
+dependencies = [
+ "dubp",
+ "duniter-dbs",
+ "duniter-dbs-read-ops",
+ "duniter-dbs-write-ops",
+ "log",
+ "thiserror",
+]
+
 [[package]]
 name = "duniter-server"
 version = "1.8.1"
@@ -1097,10 +1115,13 @@ dependencies = [
  "dubp",
  "duniter-dbs",
  "duniter-dbs-read-ops",
- "duniter-dbs-writer",
+ "duniter-dbs-write-ops",
  "duniter-gva",
+ "duniter-mempools",
+ "fast-threadpool",
  "flume",
  "log",
+ "rand 0.7.3",
  "resiter",
  "unwrap",
 ]
@@ -1190,6 +1211,17 @@ version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
 
+[[package]]
+name = "fast-threadpool"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e3f69f217417e35f8f2c52256c8852dcd72e1933261195d437d5bfc14245793"
+dependencies = [
+ "flume",
+ "num_cpus",
+ "oneshot",
+]
+
 [[package]]
 name = "fastrand"
 version = "1.4.0"
@@ -1295,7 +1327,7 @@ version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
 dependencies = [
- "bitflags",
+ "bitflags 1.2.1",
  "fuchsia-zircon-sys",
 ]
 
@@ -1424,6 +1456,25 @@ dependencies = [
  "byteorder",
 ]
 
+[[package]]
+name = "gcc"
+version = "0.3.55"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+
+[[package]]
+name = "generator"
+version = "0.6.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc"
+dependencies = [
+ "cc",
+ "libc",
+ "log",
+ "rustc_version",
+ "winapi 0.3.9",
+]
+
 [[package]]
 name = "generic-array"
 version = "0.12.3"
@@ -1537,7 +1588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f"
 dependencies = [
  "base64",
- "bitflags",
+ "bitflags 1.2.1",
  "bytes",
  "headers-core",
  "http",
@@ -1750,19 +1801,24 @@ name = "kv_typed"
 version = "0.1.0"
 dependencies = [
  "async-std",
+ "cfg-if 0.1.10",
  "criterion",
  "flume",
  "kv_typed_code_gen",
  "leveldb_minimal",
+ "lmdb-zero",
  "maybe-async",
  "mockall",
  "parking_lot 0.11.0",
+ "paste",
  "rayon",
  "regex",
  "serde_json",
  "sled",
  "smallvec",
+ "tempdir",
  "thiserror",
+ "uninit",
  "unwrap",
  "zerocopy",
 ]
@@ -1810,12 +1866,34 @@ version = "0.2.79"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743"
 
+[[package]]
+name = "liblmdb-sys"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "feed38a3a580f60bf61aaa067b0ff4123395966839adeaf67258a9e50c4d2e49"
+dependencies = [
+ "gcc",
+ "libc",
+]
+
 [[package]]
 name = "linked-hash-map"
 version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a"
 
+[[package]]
+name = "lmdb-zero"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13416eee745b087c22934f35f1f24da22da41ba2a5ce197143d168ce055cc58d"
+dependencies = [
+ "bitflags 0.9.1",
+ "libc",
+ "liblmdb-sys",
+ "supercow",
+]
+
 [[package]]
 name = "lock_api"
 version = "0.3.4"
@@ -1849,6 +1927,17 @@ version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3d0925aed5b12ed59857f438d25a910cf051dbcd4107907be1e7abf6c44ec903"
 
+[[package]]
+name = "loom"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed"
+dependencies = [
+ "cfg-if 0.1.10",
+ "generator",
+ "scoped-tls",
+]
+
 [[package]]
 name = "lru"
 version = "0.6.0"
@@ -2134,7 +2223,7 @@ version = "0.17.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363"
 dependencies = [
- "bitflags",
+ "bitflags 1.2.1",
  "cc",
  "cfg-if 0.1.10",
  "libc",
@@ -2147,7 +2236,7 @@ version = "0.18.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "83450fe6a6142ddd95fb064b746083fc4ef1705fe81f64a64e1d4b39f54a1055"
 dependencies = [
- "bitflags",
+ "bitflags 1.2.1",
  "cc",
  "cfg-if 0.1.10",
  "libc",
@@ -2254,6 +2343,15 @@ version = "1.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad"
 
+[[package]]
+name = "oneshot"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39d7085e4e51b36df4afa83db60d20ad2adf8e8587a193f93c9143bf7b375dec"
+dependencies = [
+ "loom",
+]
+
 [[package]]
 name = "oorandom"
 version = "11.1.2"
@@ -2337,6 +2435,12 @@ dependencies = [
  "regex",
 ]
 
+[[package]]
+name = "paste"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba7ae1a2180ed02ddfdb5ab70c70d596a26dd642e097bb6fe78b1bde8588ed97"
+
 [[package]]
 name = "percent-encoding"
 version = "2.1.0"
@@ -3159,6 +3263,12 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "supercow"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "171758edb47aa306a78dfa4ab9aeb5167405bd4e3dc2b64e88f6a84bbe98bd63"
+
 [[package]]
 name = "syn"
 version = "1.0.44"
@@ -3487,6 +3597,12 @@ version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
 
+[[package]]
+name = "uninit"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ce382f462302087c8effe69a6c9e84ae8ce6a9cc541d921d0bb5d1fd789cdbf"
+
 [[package]]
 name = "untrusted"
 version = "0.7.1"
diff --git a/Cargo.toml b/Cargo.toml
index bf823126a9024a3a766981f3f53782f5484597ad..25d2fb8ae50fd5361013d66e72056278e00acb57 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -34,7 +34,9 @@ members = [
     "rust-libs/dubp-wot",
     "rust-libs/duniter-dbs",
     "rust-libs/duniter-dbs-read-ops",
+    "rust-libs/duniter-dbs-write-ops",
     "rust-libs/duniter-gva",
+    "rust-libs/duniter-mempools",
     "rust-libs/duniter-server",
     "rust-libs/tools/kv_typed",
     "rust-libs/tools/kv_typed_code_gen"
diff --git a/app/lib/blockchain/DuniterBlockchain.ts b/app/lib/blockchain/DuniterBlockchain.ts
index bd9594ebac716187971853e2408e7ae29469b8cd..a6105de9c502e1c589359d0b7559f8678b1ee16e 100644
--- a/app/lib/blockchain/DuniterBlockchain.ts
+++ b/app/lib/blockchain/DuniterBlockchain.ts
@@ -496,7 +496,7 @@ export class DuniterBlockchain {
     dal: FileDAL,
     block?: DBBlock
   ) {
-    if (block && conf.gva) {
+    if (block && block.toBlockDTO) {
       dal.rustServer.revertBlock(block.toBlockDTO());
     }
 
diff --git a/app/lib/dal/fileDAL.ts b/app/lib/dal/fileDAL.ts
index b63c1e88d2b68fa7434d5ef3593c5711e2cf321f..57122d88c5161fa7b7981ff3a380a53c8e84ce27 100644
--- a/app/lib/dal/fileDAL.ts
+++ b/app/lib/dal/fileDAL.ts
@@ -283,7 +283,9 @@ export class FileDAL implements ServerDAO {
     return this.getAbsoluteBlockInForkWindow(number, hash);
   }
 
-  getAbsoluteValidBlockInForkWindowByBlockstamp(blockstamp: string) {
+  async getAbsoluteValidBlockInForkWindowByBlockstamp(
+    blockstamp: string
+  ): Promise<DBBlock | null> {
     if (!blockstamp) throw "Blockstamp is required to find the block";
     const sp = blockstamp.split("-");
     const number = parseInt(sp[0]);
@@ -1239,9 +1241,7 @@ export class FileDAL implements ServerDAO {
 
   async saveBlock(block: DBBlock, conf: ConfDTO) {
     block.wrong = false;
-    if (conf.gva) {
-      this.rustServer.applyBlock(block.toBlockDTO());
-    }
+    this.rustServer.applyBlock(block.toBlockDTO());
     await this.saveBlockInFile(block);
   }
 
diff --git a/app/modules/crawler/lib/sync/v2/GlobalIndexStream.ts b/app/modules/crawler/lib/sync/v2/GlobalIndexStream.ts
index 6519a98b93d3c30ae90c491bc7148d38b399cef7..9824b490efbe0c0cd8031cfd930a53d31b48c324 100644
--- a/app/modules/crawler/lib/sync/v2/GlobalIndexStream.ts
+++ b/app/modules/crawler/lib/sync/v2/GlobalIndexStream.ts
@@ -438,9 +438,7 @@ export class GlobalIndexStream extends Duplex {
       })
     );
 
-    if (this.conf.gva) {
-      this.dal.rustServer.applyChunkOfBlocks(blocks);
-    }
+    this.dal.rustServer.applyChunkOfBlocks(blocks);
 
     logger.debug("Total tx count: %s", txCount);
   }
diff --git a/neon/lib/event_emitter.ts b/neon/lib/event_emitter.ts
deleted file mode 100644
index 83eb4911ae53cdf57e53b9324b0b9a55608b6f4f..0000000000000000000000000000000000000000
--- a/neon/lib/event_emitter.ts
+++ /dev/null
@@ -1,58 +0,0 @@
-import { EventEmitter } from 'events';
-import { RustEventEmitter as RustEventEmitterInner } from '../native';
-
-export class RustEventEmitter extends EventEmitter {
-
-    isShutdown: boolean;
-
-    constructor() {
-      super();
-  
-      // Create an instance of the Neon class
-      const channel = new RustEventEmitterInner();
-
-      // Marks the emitter as shutdown to stop iteration of the `poll` loop
-      this.isShutdown = false;
-  
-      // The `loop` method is called continuously to receive data from the Rust
-      // work thread.
-      const loop = async () => {
-        // Stop the receiving loop and shutdown the work thead. However, since
-        // the `poll` method uses a blocking `recv`, this code will not execute
-        // until either the next event is sent on the channel or a receive
-        // timeout has occurred.
-        if (this.isShutdown) {
-          return;
-        }
-
-        await new Promise((res, rej) => setTimeout(() => res(), 100));
-  
-        // Poll for data
-        channel.poll((err, e) => {
-          if (err) this.emit('error', err);
-          else if (e) {
-            //console.log("TMP: js receive event from rust");
-            const { event, ...data } = e;
-  
-            // Emit the event
-            this.emit(event, data);
-          }
-  
-          // Schedule the next iteration of the loop. This is performed with
-          // a `setImmediate` to yield to the event loop, to let JS code run
-          // and avoid a stack overflow.
-          setImmediate(loop);
-        });
-      };
-  
-      // Start the polling loop on next iteration of the JS event loop to prevent zalgo.
-      setImmediate(loop);
-    }
-  
-    // Mark the channel for shutdown
-    shutdown() {
-      this.isShutdown = true;
-      return this;
-    }
-  }
-
diff --git a/neon/lib/index.ts b/neon/lib/index.ts
index 086ca0e879fd9e045fe6dcab8375ee37a77b9c30..80f731b88f07c99ce24c8da3cb3c4faaec4d039c 100644
--- a/neon/lib/index.ts
+++ b/neon/lib/index.ts
@@ -15,5 +15,4 @@ export {
     Wot
 } from "../native";
 export { KeyPairBuilder } from "./crypto";
-export { RustEventEmitter } from "./event_emitter";
 export { WotBuilder } from "./wot";
diff --git a/neon/native/artifacts.json b/neon/native/artifacts.json
index 3f346e098060aa63ab49162d2e1b63335e8b2b02..837b6fb098085aa2c03e9f16f2d3012e99586b67 100644
--- a/neon/native/artifacts.json
+++ b/neon/native/artifacts.json
@@ -1 +1 @@
-{"active":"release","targets":{"debug":{"rustc":"","env":{"npm_config_target":null,"npm_config_arch":null,"npm_config_target_arch":null,"npm_config_disturl":null,"npm_config_runtime":null,"npm_config_build_from_source":null,"npm_config_devdir":null}},"release":{"rustc":"","env":{"npm_config_target":null,"npm_config_arch":null,"npm_config_target_arch":null,"npm_config_disturl":null,"npm_config_runtime":null,"npm_config_build_from_source":null,"npm_config_devdir":null}}}}
\ No newline at end of file
+{"active":"debug","targets":{"debug":{"rustc":"","env":{"npm_config_target":null,"npm_config_arch":null,"npm_config_target_arch":null,"npm_config_disturl":null,"npm_config_runtime":null,"npm_config_build_from_source":null,"npm_config_devdir":null}},"release":{"rustc":"","env":{"npm_config_target":null,"npm_config_arch":null,"npm_config_target_arch":null,"npm_config_disturl":null,"npm_config_runtime":null,"npm_config_build_from_source":null,"npm_config_devdir":null}}}}
\ No newline at end of file
diff --git a/neon/native/event_emitter.d.ts b/neon/native/event_emitter.d.ts
deleted file mode 100644
index 2a11516142fb322d4a63a43b21408dbc5e841b62..0000000000000000000000000000000000000000
--- a/neon/native/event_emitter.d.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-/* tslint:disable */
-
-import { TransactionDTOV10 } from './transaction';
-
-export class Event {
-    event: string;
-    data: TransactionDTOV10[];
-}
-
-export class RustEventEmitter {
-    constructor()
-
-    poll(cb: (err: any, event: Event) => void): void
-}
diff --git a/neon/native/index.d.ts b/neon/native/index.d.ts
index cd5b6f24113226d572a01901dcfb7f91544bc6f3..2abc4e1a44d88cd9f7f0a5b27ae325911d96816f 100644
--- a/neon/native/index.d.ts
+++ b/neon/native/index.d.ts
@@ -1,7 +1,6 @@
 /* tslint:disable */
 
 import * as _crypto from './crypto';
-import * as _event_emitter from './event_emitter';
 import * as _logger from './logger';
 import * as _server from './server';
 import * as _transactions from './transaction';
@@ -13,8 +12,6 @@ export import seedToSecretKey = _crypto.seedToSecretKey;
 export import sha256 = _crypto.sha256;
 export import verify = _crypto.verify;
 
-export import RustEventEmitter = _event_emitter.RustEventEmitter;
-
 export import RustLogger = _logger.RustLogger;
 
 export import RustDbTx = _server.RustDbTx;
diff --git a/neon/native/src/event_emitter.rs b/neon/native/src/event_emitter.rs
deleted file mode 100644
index cde725f32864a18fe4dfe42f784c27a42a080dcc..0000000000000000000000000000000000000000
--- a/neon/native/src/event_emitter.rs
+++ /dev/null
@@ -1,140 +0,0 @@
-//  Copyright (C) 2020 Éloïs SANCHEZ.
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-use dubp::documents::prelude::ToStringObject;
-use dubp::documents::transaction::TransactionDocumentV10Stringified;
-use once_cell::sync::OnceCell;
-
-use super::*;
-use std::ops::Deref;
-
-#[derive(Clone)]
-pub struct EventEmitter {
-    txs_mps_subscriber: Option<duniter_server::TxsMpSubscriber>,
-}
-
-static EVENT_EMITTER: OnceCell<EventEmitter> = OnceCell::new();
-
-pub(crate) fn init_event_emitter(txs_mps_subscriber: Option<duniter_server::TxsMpSubscriber>) {
-    let _ = EVENT_EMITTER.set(EventEmitter { txs_mps_subscriber });
-}
-
-declare_types! {
-    pub class JsEventEmitter for EventEmitter {
-        init(_cx) {
-            Ok(EVENT_EMITTER.get().expect("EVENT_EMITTER is not initialized").clone())
-        }
-        // This method should be called by JS to receive data. It accepts a
-        // `function (err, data)` style asynchronous callback. It may be called
-        // in a loop, but care should be taken to only call it once at a time.
-        method poll(mut cx) {
-            // The callback to be executed when data is available
-            let cb = cx.argument::<JsFunction>(0)?;
-
-            // Create an asynchronously `EventEmitterTask` to receive data
-            let this = cx.this();
-            let task_opt = {
-                let guard = cx.lock();
-                let event_emitter = this.borrow(&guard);
-                if let Some(ref txs_mps_subscriber) = event_emitter.txs_mps_subscriber {
-                    Some(EventEmitterTask(txs_mps_subscriber.clone()))
-                } else {
-                    None
-                }
-            };
-
-            // Schedule the task on the `libuv` thread pool
-            if let Some(task) = task_opt {
-                task.schedule(cb);
-            }
-
-            // The `poll` method does not return any data.
-            Ok(JsUndefined::new().upcast())
-        }
-    }
-}
-
-pub enum Event {
-    ReceiveValidTxs {
-        txs: duniter_server::smallvec::SmallVec<[TransactionDocumentV10Stringified; 4]>,
-    },
-}
-
-// Reading from a channel `Receiver` is a blocking operation. This struct
-// wraps the data required to perform a read asynchronously from a libuv
-// thread.
-pub struct EventEmitterTask(pub(crate) duniter_server::TxsMpSubscriber);
-
-// Implementation of a neon `Task` for `EventEmitterTask`. This task reads
-// from the events channel and calls a JS callback with the data.
-impl Task for EventEmitterTask {
-    type Output = Option<Event>;
-    type Error = String;
-    type JsEvent = JsValue;
-
-    fn perform(&self) -> Result<Self::Output, Self::Error> {
-        match self.0.try_recv() {
-            Ok(events) => {
-                let mut txs = duniter_server::smallvec::SmallVec::new();
-                for event in events.deref() {
-                    if let duniter_server::TxEvent::Upsert {
-                        value: pending_tx, ..
-                    } = event
-                    {
-                        txs.push(pending_tx.0.to_string_object());
-                    }
-                }
-                if txs.is_empty() {
-                    Ok(None)
-                } else {
-                    Ok(Some(Event::ReceiveValidTxs { txs }))
-                }
-            }
-            Err(flume::TryRecvError::Empty) => Ok(None),
-            Err(flume::TryRecvError::Disconnected) => Err("Failed to receive event".to_string()),
-        }
-    }
-
-    fn complete(
-        self,
-        mut cx: TaskContext<'_>,
-        result: Result<Self::Output, Self::Error>,
-    ) -> JsResult<Self::JsEvent> {
-        // Receive the event or return early with the error
-        let event = result.or_else(|err| cx.throw_error(&err))?;
-
-        // Timeout occured, return early with `undefined
-        let event = match event {
-            Some(event) => event,
-            None => return Ok(JsUndefined::new().upcast()),
-        };
-
-        // Create an empty object `{}`
-        let o = cx.empty_object();
-
-        // Creates an object of the shape `{ "event": string, ...data }`
-        match event {
-            Event::ReceiveValidTxs { txs } => {
-                let event_name = cx.string("txs");
-                let event_content = neon_serde::to_value(&mut cx, &txs)?;
-
-                o.set(&mut cx, "event", event_name)?;
-                o.set(&mut cx, "data", event_content)?;
-            }
-        }
-
-        Ok(o.upcast())
-    }
-}
diff --git a/neon/native/src/lib.rs b/neon/native/src/lib.rs
index e71400b7c63960053f17b26821f90e6330cf3d1b..b1c59b070d75f8a1ce83b5628ca7e30f93409107 100644
--- a/neon/native/src/lib.rs
+++ b/neon/native/src/lib.rs
@@ -25,7 +25,6 @@
 )]
 
 mod crypto;
-mod event_emitter;
 mod logger;
 mod server;
 mod transaction;
@@ -52,7 +51,6 @@ register_module!(mut cx, {
     cx.export_function("sha256", crate::crypto::sha256)?;
     cx.export_function("verify", crate::crypto::verify)?;
     cx.export_class::<crate::crypto::JsKeyPair>("Ed25519Signator")?;
-    cx.export_class::<crate::event_emitter::JsEventEmitter>("RustEventEmitter")?;
     cx.export_class::<crate::logger::JsLogger>("RustLogger")?;
     cx.export_class::<crate::server::JsServer>("RustServer")?;
     cx.export_function(
diff --git a/neon/native/src/server.rs b/neon/native/src/server.rs
index 9fbc32ba672eccb7f037e26e2f498338d880671b..6e4e860574b11227b1486da56836d15cbc4f37b5 100644
--- a/neon/native/src/server.rs
+++ b/neon/native/src/server.rs
@@ -79,12 +79,10 @@ declare_types! {
                 None
             };
             if let Some(home_path) = home_path_opt {
-                let (server, txs_mps_subscriber) = DuniterServer::start(conf, Some(home_path.as_path()), std::env!("CARGO_PKG_VERSION"));
-                crate::event_emitter::init_event_emitter(Some(txs_mps_subscriber));
+                let server = DuniterServer::start(conf, Some(home_path.as_path()), std::env!("CARGO_PKG_VERSION"));
                 Ok(RustServer { server })
             } else {
-                let (server, _) = DuniterServer::start(conf, None, std::env!("CARGO_PKG_VERSION"));
-                crate::event_emitter::init_event_emitter(None);
+                let server = DuniterServer::start(conf, None, std::env!("CARGO_PKG_VERSION"));
                 Ok(RustServer { server })
             }
         }
@@ -234,8 +232,7 @@ declare_types! {
             let res = {
                 let guard = cx.lock();
                 let server = this.borrow(&guard);
-                let recv = server.server.add_pending_tx(tx, true);
-                recv.recv().expect("rust server disconnected")
+                server.server.add_pending_tx_force(tx)
             }.map(|_| cx.undefined().upcast());
             into_neon_res(&mut cx, res)
         }
@@ -274,12 +271,12 @@ declare_types! {
         }
         method getMempoolTxsFreeRooms(mut cx) {
             let this = cx.this();
-            let free_rooms = {
+            let res = {
                 let guard = cx.lock();
                 let server = this.borrow(&guard);
                 server.server.get_mempool_txs_free_rooms()
-            };
-            Ok(cx.number(free_rooms as f64).upcast())
+            }.map(|free_rooms| cx.number(free_rooms as f64).upcast());
+            into_neon_res(&mut cx, res)
         }
         method removeAllPendingTxs(mut cx) {
             let this = cx.this();
diff --git a/rust-bins/duniter-dbex/Cargo.toml b/rust-bins/duniter-dbex/Cargo.toml
index 8681257ac85d4b4958e5ff1d088a0419c5e00251..a62fcacba5049a4bb506d480a41d58c3f5cd52d5 100644
--- a/rust-bins/duniter-dbex/Cargo.toml
+++ b/rust-bins/duniter-dbex/Cargo.toml
@@ -22,7 +22,7 @@ arrayvec = "0.5.1"
 comfy-table = "1.0.0"
 dirs = "3.0.1"
 dubp = { version = "0.29.0" }
-duniter-dbs = { path = "../../rust-libs/duniter-dbs", default-features = false, features = ["explorer", "leveldb_backend", "sled_backend", "sync"] }
+duniter-dbs = { path = "../../rust-libs/duniter-dbs", default-features = false, features = ["explorer", "leveldb_backend", "sled_backend"] }
 rayon = "1.3.1"
 serde_json = "1.0.53"
 structopt = "0.3.16"
diff --git a/rust-libs/duniter-dbs-read-ops/src/lib.rs b/rust-libs/duniter-dbs-read-ops/src/lib.rs
index 18951c6a0d7b2d3a0e020ad4739eecc702560054..c5bce22f260fc26aea9e8e989edda907a9ebbbf9 100644
--- a/rust-libs/duniter-dbs-read-ops/src/lib.rs
+++ b/rust-libs/duniter-dbs-read-ops/src/lib.rs
@@ -33,7 +33,6 @@ use duniter_dbs::{
     //BlockNumberArrayV2, BlockNumberKeyV2, SourceAmountValV2, UtxosOfScriptV1
     //GvaV1Db,
     GvaV1DbReadable,
-    GvaV1DbRo,
     //GvaV1DbWritable,
     HashKeyV2,
     //PendingTxDbV2,
@@ -41,7 +40,6 @@ use duniter_dbs::{
     TxDbV2,
     //TxsMpV2Db,
     TxsMpV2DbReadable,
-    TxsMpV2DbRo,
     //TxsMpV2DbWritable,
     //WalletConditionsV2,
 };
diff --git a/rust-libs/duniter-dbs-read-ops/src/txs_history.rs b/rust-libs/duniter-dbs-read-ops/src/txs_history.rs
index 72132f9f1a2e704c96b976709d3db5af8ac541f1..96f75be805fcd8bb7a9f8ccdfeeb3d04d95d15fb 100644
--- a/rust-libs/duniter-dbs-read-ops/src/txs_history.rs
+++ b/rust-libs/duniter-dbs-read-ops/src/txs_history.rs
@@ -22,13 +22,13 @@ pub struct TxsHistory {
     pub pending: Vec<TransactionDocumentV10>,
 }
 
-pub fn tx_exist<B: Backend>(gva_db_ro: &GvaV1DbRo<B>, hash: Hash) -> KvResult<bool> {
+pub fn tx_exist<GvaDb: GvaV1DbReadable>(gva_db_ro: &GvaDb, hash: Hash) -> KvResult<bool> {
     Ok(gva_db_ro.txs().get(&HashKeyV2(hash))?.is_some())
 }
 
-pub fn get_transactions_history<B: Backend>(
-    gva_db_ro: &GvaV1DbRo<B>,
-    txs_mp_db_ro: &TxsMpV2DbRo<B>,
+pub fn get_transactions_history<GvaDb: GvaV1DbReadable, TxsMpDb: TxsMpV2DbReadable>(
+    gva_db_ro: &GvaDb,
+    txs_mp_db_ro: &TxsMpDb,
     pubkey: PublicKey,
 ) -> KvResult<TxsHistory> {
     let sent = gva_db_ro
diff --git a/rust-libs/duniter-dbs-read-ops/src/utxos.rs b/rust-libs/duniter-dbs-read-ops/src/utxos.rs
index 4528daf9431f2c4ebfc3846775162149522215cd..5e833fb821437a7844498a1bf6c33f940c604ff0 100644
--- a/rust-libs/duniter-dbs-read-ops/src/utxos.rs
+++ b/rust-libs/duniter-dbs-read-ops/src/utxos.rs
@@ -18,8 +18,8 @@ use duniter_dbs::WalletConditionsV2;
 
 use crate::*;
 
-pub fn get_script_utxos<B: Backend>(
-    gva_db_ro: &GvaV1DbRo<B>,
+pub fn get_script_utxos<GvaDb: GvaV1DbReadable>(
+    gva_db_ro: &GvaDb,
     script: &WalletScriptV10,
 ) -> KvResult<Vec<(i64, UtxoIdV10, SourceAmount)>> {
     if let Some(utxos_of_script) = gva_db_ro
diff --git a/rust-libs/duniter-dbs-writer/Cargo.toml b/rust-libs/duniter-dbs-write-ops/Cargo.toml
similarity index 84%
rename from rust-libs/duniter-dbs-writer/Cargo.toml
rename to rust-libs/duniter-dbs-write-ops/Cargo.toml
index 83048821a6c2088b5f60975bc0b2bb2b8874a238..c6013efb36f0bf1fae8967c391b8537e3740b80c 100644
--- a/rust-libs/duniter-dbs-writer/Cargo.toml
+++ b/rust-libs/duniter-dbs-write-ops/Cargo.toml
@@ -1,8 +1,8 @@
 [package]
-name = "duniter-dbs-writer"
+name = "duniter-dbs-write-ops"
 version = "0.1.0"
 authors = ["elois <elois@duniter.org>"]
-description = "Duniter DBs writer"
+description = "Duniter DBs write operations"
 repository = "https://git.duniter.org/nodes/typescript/duniter"
 keywords = ["dubp", "duniter", "blockchain", "database"]
 license = "AGPL-3.0"
@@ -15,7 +15,6 @@ path = "src/lib.rs"
 chrono = "0.4.19"
 dubp = { version = "0.29.0" }
 duniter-dbs = { path = "../duniter-dbs" }
-flume = "0.9.1"
 log = "0.4.11"
 resiter = "0.4.0"
 
diff --git a/rust-libs/duniter-dbs-writer/src/identities.rs b/rust-libs/duniter-dbs-write-ops/src/identities.rs
similarity index 100%
rename from rust-libs/duniter-dbs-writer/src/identities.rs
rename to rust-libs/duniter-dbs-write-ops/src/identities.rs
diff --git a/rust-libs/duniter-dbs-write-ops/src/lib.rs b/rust-libs/duniter-dbs-write-ops/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..043295c09236a51adcd101e3295961f93d102194
--- /dev/null
+++ b/rust-libs/duniter-dbs-write-ops/src/lib.rs
@@ -0,0 +1,411 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+mod identities;
+mod tx;
+mod utxos;
+
+use std::borrow::Cow;
+
+use crate::utxos::UtxoV10;
+use dubp::block::DubpBlockV10Stringified;
+use dubp::common::crypto::hashs::Hash;
+use dubp::common::prelude::*;
+use dubp::documents::{
+    prelude::*, smallvec::SmallVec, transaction::TransactionDocumentTrait,
+    transaction::TransactionDocumentV10,
+};
+use dubp::documents_parser::prelude::*;
+use dubp::wallet::prelude::*;
+use duniter_dbs::gva_v1::{TxEvent, TxsByIssuerEvent, TxsByRecipientEvent};
+use duniter_dbs::{
+    kv_typed::prelude::*, GvaV1Db, GvaV1DbReadable, GvaV1DbWritable, HashKeyV2, PendingTxDbV2,
+    PubKeyKeyV2, TxDbV2, TxsMpV2Db, TxsMpV2DbReadable, TxsMpV2DbWritable, WalletConditionsV2,
+};
+use resiter::flatten::Flatten;
+use resiter::map::Map;
+
+pub fn add_pending_tx<
+    B: Backend,
+    F: FnOnce(
+        &TransactionDocumentV10,
+        &TxColRw<B::Col, duniter_dbs::txs_mp_v2::TxEvent>,
+    ) -> KvResult<()>,
+>(
+    control: F,
+    txs_mp_db: &TxsMpV2Db<B>,
+    tx: Cow<TransactionDocumentV10>,
+) -> KvResult<()> {
+    let tx_hash = tx.get_hash();
+    let received_time = chrono::offset::Utc::now().timestamp();
+    (
+        txs_mp_db.txs_by_recv_time_write(),
+        txs_mp_db.txs_by_issuer_write(),
+        txs_mp_db.txs_by_recipient_write(),
+        txs_mp_db.txs_write(),
+    )
+        .write(
+            |(mut txs_by_recv_time, mut txs_by_issuer, mut txs_by_recipient, mut txs)| {
+                control(&tx, &txs)?;
+                // Insert on col `txs_by_recv_time`
+                let mut hashs = txs_by_recv_time.get(&received_time)?.unwrap_or_default();
+                hashs.0.insert(tx_hash);
+                txs_by_recv_time.upsert(received_time, hashs);
+                // Insert on col `txs_by_issuer`
+                for pubkey in tx.issuers() {
+                    let mut hashs = txs_by_issuer.get(&PubKeyKeyV2(pubkey))?.unwrap_or_default();
+                    hashs.0.insert(tx.get_hash());
+                    txs_by_issuer.upsert(PubKeyKeyV2(pubkey), hashs);
+                }
+                // Insert on col `txs_by_recipient`
+                for pubkey in tx.recipients_keys() {
+                    let mut hashs = txs_by_recipient
+                        .get(&PubKeyKeyV2(pubkey))?
+                        .unwrap_or_default();
+                    hashs.0.insert(tx.get_hash());
+                    txs_by_recipient.upsert(PubKeyKeyV2(pubkey), hashs);
+                }
+                // Insert tx itself
+                txs.upsert(HashKeyV2(tx_hash), PendingTxDbV2(tx.into_owned()));
+                Ok(())
+            },
+        )
+}
+
+pub fn remove_all_pending_txs<B: Backend>(txs_mp_db: &TxsMpV2Db<B>) -> KvResult<()> {
+    txs_mp_db.txs_by_recv_time_write().clear()?;
+    txs_mp_db.txs_by_issuer_write().clear()?;
+    txs_mp_db.txs_by_recipient_write().clear()?;
+    txs_mp_db.txs_write().clear()?;
+
+    Ok(())
+}
+
+pub fn remove_pending_tx_by_hash<B: Backend>(txs_mp_db: &TxsMpV2Db<B>, hash: Hash) -> KvResult<()> {
+    remove_one_pending_tx(&txs_mp_db, hash)?;
+    Ok(())
+}
+
+pub fn revert_block<B: Backend>(
+    gva_db: &GvaV1Db<B>,
+    txs_mp_db: &TxsMpV2Db<B>,
+    block: DubpBlockV10Stringified,
+    gva: bool,
+) -> KvResult<()> {
+    for tx in &block.transactions {
+        let tx_hash = if let Some(ref tx_hash) = tx.hash {
+            Hash::from_hex(&tx_hash)
+                .map_err(|e| KvError::DeserError(format!("Transaction with invalid hash: {}", e)))?
+        } else {
+            return Err(KvError::DeserError(
+                "Try to revert a block that contains a transaction without hash !".to_owned(),
+            ));
+        };
+        if gva {
+            let tx = tx::revert_tx(gva_db, &tx_hash)?.ok_or_else(|| {
+                KvError::DbCorrupted(format!("GVA: tx '{}' dont exist on txs history.", tx_hash,))
+            })?;
+            add_pending_tx(|_, _| Ok(()), txs_mp_db, Cow::Owned(tx))?;
+        } else {
+            add_pending_tx(
+                |_, _| Ok(()),
+                txs_mp_db,
+                Cow::Owned(
+                    TransactionDocumentV10::from_string_object(&tx).map_err(|e| {
+                        KvError::DeserError(format!("Block with invalid tx: {}", e))
+                    })?,
+                ),
+            )?;
+        }
+    }
+
+    identities::revert_identities(gva_db, &block)?;
+
+    Ok(())
+}
+
+pub fn apply_block<B: Backend>(
+    gva_db: &GvaV1Db<B>,
+    txs_mp_db: &TxsMpV2Db<B>,
+    block: DubpBlockV10Stringified,
+    gva: bool,
+) -> KvResult<()> {
+    let block_hash = if let Some(ref block_hash_str) = block.hash {
+        Hash::from_hex(&block_hash_str)
+            .map_err(|_| KvError::DeserError(format!("Hash '{}' is invalid", block_hash_str)))?
+    } else {
+        return Err(KvError::DeserError(format!(
+            "Block #{} is without hash",
+            block.number
+        )));
+    };
+    let blockstamp = Blockstamp {
+        number: BlockNumber(block.number as u32),
+        hash: BlockHash(block_hash),
+    };
+    let txs = block
+        .transactions
+        .iter()
+        .map(|tx_str| TransactionDocumentV10::from_string_object(tx_str))
+        .collect::<Result<Vec<TransactionDocumentV10>, TextParseError>>()
+        .map_err(|e| KvError::DeserError(format!("Invalid transaction in block: {}", e)))?;
+    write_block_txs(
+        &txs_mp_db,
+        &gva_db,
+        blockstamp,
+        block.median_time as i64,
+        gva,
+        txs,
+    )?;
+
+    if gva {
+        identities::update_identities(&gva_db, &block)?;
+    }
+
+    Ok(())
+}
+
+#[inline(always)]
+pub fn apply_chunk_of_blocks<B: Backend>(
+    gva_db: &GvaV1Db<B>,
+    txs_mp_db: &TxsMpV2Db<B>,
+    blocks: Vec<DubpBlockV10Stringified>,
+    gva: bool,
+) -> KvResult<()> {
+    for block in blocks {
+        if block.number > 300_000 {
+            log::info!("apply_block(#{})", block.number);
+        }
+        apply_block(gva_db, txs_mp_db, block, gva)?;
+    }
+    Ok(())
+}
+
+fn write_block_txs<B: Backend>(
+    txs_mp_db: &TxsMpV2Db<B>,
+    gva_db: &GvaV1Db<B>,
+    current_blockstamp: Blockstamp,
+    current_time: i64,
+    gva: bool,
+    txs: Vec<TransactionDocumentV10>,
+) -> KvResult<()> {
+    for tx in txs {
+        let tx_hash = tx.get_hash();
+        // Remove tx from mempool
+        remove_one_pending_tx(&txs_mp_db, tx_hash)?;
+        // Write tx and update sources
+        if gva {
+            tx::write_gva_tx(current_blockstamp, current_time, &gva_db, tx_hash, tx)?;
+        }
+    }
+    Ok(())
+}
+
+pub fn trim_expired_non_written_txs<B: Backend>(
+    txs_mp_db: &TxsMpV2Db<B>,
+    limit_time: i64,
+) -> KvResult<()> {
+    // Get hashs of tx to remove and "times" to remove
+    let mut times = Vec::new();
+    let hashs = txs_mp_db.txs_by_recv_time().iter(..limit_time, |it| {
+        it.map_ok(|(k, v)| {
+            times.push(k);
+            v.0
+        })
+        .flatten_ok()
+        .collect::<KvResult<SmallVec<[Hash; 4]>>>()
+    })?;
+    // For each tx to remove
+    for (hash, time) in hashs.into_iter().zip(times.into_iter()) {
+        remove_one_pending_tx(&txs_mp_db, hash)?;
+        // Remove txs hashs in col `txs_by_recv_time`
+        txs_mp_db.txs_by_recv_time_write().remove(time)?;
+    }
+
+    Ok(())
+}
+
+fn remove_one_pending_tx<B: Backend>(txs_mp_db: &TxsMpV2Db<B>, tx_hash: Hash) -> KvResult<bool> {
+    if let Some(tx) = txs_mp_db.txs().get(&HashKeyV2(tx_hash))? {
+        // Remove tx hash in col `txs_by_issuer`
+        for pubkey in tx.0.issuers() {
+            let mut hashs_ = txs_mp_db
+                .txs_by_issuer()
+                .get(&PubKeyKeyV2(pubkey))?
+                .unwrap_or_default();
+            hashs_.0.remove(&tx_hash);
+            txs_mp_db
+                .txs_by_issuer_write()
+                .upsert(PubKeyKeyV2(pubkey), hashs_)?
+        }
+        // Remove tx hash in col `txs_by_recipient`
+        for pubkey in tx.0.recipients_keys() {
+            let mut hashs_ = txs_mp_db
+                .txs_by_recipient()
+                .get(&PubKeyKeyV2(pubkey))?
+                .unwrap_or_default();
+            hashs_.0.remove(&tx_hash);
+            txs_mp_db
+                .txs_by_recipient_write()
+                .upsert(PubKeyKeyV2(pubkey), hashs_)?
+        }
+        // Remove tx itself
+        txs_mp_db.txs_write().remove(HashKeyV2(tx_hash))?;
+        Ok(true)
+    } else {
+        Ok(false)
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use dubp::documents::transaction::TransactionDocumentV10Stringified;
+
+    #[test]
+    #[ignore]
+    fn tmp_apply_block_real() -> KvResult<()> {
+        let gva_db = GvaV1Db::<Sled>::open(
+            SledConf::default()
+                .path("/home/elois/.config/duniter/s2/data/gva_v1_sled")
+                .flush_every_ms(None),
+        )?;
+        let txs_mp_db = TxsMpV2Db::<Sled>::open(
+            SledConf::default()
+                .path("/home/elois/.config/duniter/s2/data/txs_mp_v2_sled")
+                .flush_every_ms(None),
+        )?;
+
+        let txs: Vec<TransactionDocumentV10Stringified> = serde_json::from_str(r#"[
+            {
+              "version": 10,
+              "currency": "g1",
+              "comment": ". je me sens plus legere mm si....reste le bon toit a trouver dans un temps record ! Merci pour cet eclairage fort",
+              "locktime": 0,
+              "signatures": [
+                "8t5vo+k5OvkyAd+L+J8g6MLpp/AP0qOQFcJvf+OPMEZaVnHH38YtCigo64unU9aCsb9zZc6UEc78ZrkQ/E2TCg=="
+              ],
+              "outputs": [
+                "5000:0:SIG(5VYg9YHvLQuoky7EPyyk3cEfBUtB1GuAeJ6SiJ6c9wWe)",
+                "55:0:SIG(Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x)"
+              ],
+              "inputs": [
+                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:296658",
+                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:296936",
+                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:297211",
+                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:297489",
+                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:297786"
+              ],
+              "unlocks": [
+                "0:SIG(0)",
+                "1:SIG(0)",
+                "2:SIG(0)",
+                "3:SIG(0)",
+                "4:SIG(0)"
+              ],
+              "blockstamp": "304284-000003F738B9A5FC8F5D04B4B9746FD899B3A49367099BB2796E7EF976DCDABB",
+              "blockstampTime": 0,
+              "issuers": [
+                "Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x"
+              ],
+              "block_number": 0,
+              "time": 0
+            },
+            {
+              "version": 10,
+              "currency": "g1",
+              "comment": "Pour les places de cine et l expedition ..Merci",
+              "locktime": 0,
+              "signatures": [
+                "VhzwAwsCr30XnetveS74QD2kJMYCQ89VZvyUBJM9DP/kd5KBqkF1c1HcKpJdHrfu2oq3JbSEIhEf/aLgnEdSCw=="
+              ],
+              "outputs": [
+                "6000:0:SIG(jUPLL2BgY2QpheWEY3R13edV2Y4tvQMCXjJVM8PGDvyd)",
+                "10347:0:SIG(2CWxxkttvkGSUVZdaUZHiksNisDC3wJx32Y2NVAyeHez)"
+              ],
+              "inputs": [
+                "347:0:T:4EA4D01422469ABA380F48A48254EB3F15606C12FE4CFF7E7D6EEB1FD9752DDB:1",
+                "16000:0:T:9A4DA56EF5F9B50D612D806BAE0886EB3033B4F166D2E96498DE16B83F39B59D:0"
+              ],
+              "unlocks": [
+                "0:SIG(0)",
+                "1:SIG(0)"
+              ],
+              "blockstamp": "304284-000003F738B9A5FC8F5D04B4B9746FD899B3A49367099BB2796E7EF976DCDABB",
+              "blockstampTime": 0,
+              "issuers": [
+                "2CWxxkttvkGSUVZdaUZHiksNisDC3wJx32Y2NVAyeHez"
+              ],
+              "block_number": 0,
+              "time": 0
+            },
+            {
+              "version": 10,
+              "currency": "g1",
+              "comment": "POur le sac a tarte merci",
+              "locktime": 0,
+              "signatures": [
+                "721K4f+F9PgksoVDZgQTURJIO/DZUhQfAzXfBvYrFkgqHNNeBbcgGecFX63rPYjFvau+qg1Hmi0coL9z7r7EAQ=="
+              ],
+              "outputs": [
+                "15000:0:SIG(KxyNK1k55PEA8eBjX1K4dLJr35gC2dwMwNFPHwvZFH4)",
+                "17668:0:SIG(4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg)"
+              ],
+              "inputs": [
+                "1011:0:D:4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg:303924",
+                "1011:0:D:4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg:304212",
+                "10458:0:T:55113E18AB61603AD0FC24CD11ACBC96F9583FD0A5877055F17315E9613BBF7D:1",
+                "20188:0:T:937A0454C1A63B383FBB6D219B9312B0A36DFE19DA08076BD113F9D5D4FC903D:1"
+              ],
+              "unlocks": [
+                "0:SIG(0)",
+                "1:SIG(0)",
+                "2:SIG(0)",
+                "3:SIG(0)"
+              ],
+              "blockstamp": "304284-000003F738B9A5FC8F5D04B4B9746FD899B3A49367099BB2796E7EF976DCDABB",
+              "blockstampTime": 0,
+              "issuers": [
+                "4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg"
+              ],
+              "block_number": 0,
+              "time": 0
+            }
+          ]"#).expect("wrong tx");
+
+        let block = DubpBlockV10Stringified {
+            number: 304286,
+            hash: Some(
+                "000001339AECF3CAB78B2B61776FB3819B800AB43923F4F8BD0F5AE47B7DEAB9".to_owned(),
+            ),
+            median_time: 1583862823,
+            transactions: txs,
+            ..Default::default()
+        };
+
+        apply_block(&gva_db, &txs_mp_db, block, true)?;
+
+        Ok(())
+    }
+}
diff --git a/rust-libs/duniter-dbs-write-ops/src/tx.rs b/rust-libs/duniter-dbs-write-ops/src/tx.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6dc3d21caa073c69ee15868654a0db2a0898bcb1
--- /dev/null
+++ b/rust-libs/duniter-dbs-write-ops/src/tx.rs
@@ -0,0 +1,219 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::*;
+
+pub(crate) fn write_gva_tx<B: Backend>(
+    current_blockstamp: Blockstamp,
+    current_time: i64,
+    gva_db: &GvaV1Db<B>,
+    tx_hash: Hash,
+    tx: TransactionDocumentV10,
+) -> KvResult<()> {
+    (
+        gva_db.scripts_by_pubkey_write(),
+        gva_db.txs_by_issuer_write(),
+        gva_db.txs_by_recipient_write(),
+        gva_db.txs_write(),
+        gva_db.utxos_by_script_write(),
+    )
+        .write(
+            |(
+                mut scripts_by_pubkey,
+                mut txs_by_issuer,
+                mut txs_by_recipient,
+                mut txs,
+                mut utxos_by_script,
+            )| {
+                // Insert on col `txs_by_issuer`
+                for pubkey in tx.issuers() {
+                    let mut hashs = txs_by_issuer.get(&PubKeyKeyV2(pubkey))?.unwrap_or_default();
+                    hashs.0.insert(tx_hash);
+                    txs_by_issuer.upsert(PubKeyKeyV2(pubkey), hashs);
+                }
+                // Insert on col `txs_by_recipient`
+                for pubkey in tx.recipients_keys() {
+                    let mut hashs = txs_by_recipient
+                        .get(&PubKeyKeyV2(pubkey))?
+                        .unwrap_or_default();
+                    hashs.0.insert(tx_hash);
+                    txs_by_recipient.upsert(PubKeyKeyV2(pubkey), hashs);
+                }
+
+                // Remove consumed UTXOs
+                for input in tx.get_inputs() {
+                    // TODO ESZ remove UD sources
+                    if let SourceIdV10::Utxo(utxo_id) = input.id {
+                        let db_tx_origin = gva_db
+                            .txs()
+                            .get(&HashKeyV2::from_ref(&utxo_id.tx_hash))?
+                            .ok_or_else(|| {
+                                KvError::DbCorrupted(format!(
+                                    "Not found origin tx of uxto {}",
+                                    utxo_id
+                                ))
+                            })?;
+                        let utxo_script = db_tx_origin.tx.get_outputs()[utxo_id.output_index]
+                            .conditions
+                            .script
+                            .clone();
+                        utxos::remove_utxo_v10::<B>(
+                            &mut scripts_by_pubkey,
+                            &mut utxos_by_script,
+                            &utxo_script,
+                            db_tx_origin.written_time,
+                        )?;
+                    }
+                }
+
+                // Insert created UTXOs
+                for (output_index, output) in tx.get_outputs().iter().enumerate() {
+                    utxos::write_utxo_v10::<B>(
+                        &mut scripts_by_pubkey,
+                        &mut utxos_by_script,
+                        UtxoV10 {
+                            id: UtxoIdV10 {
+                                tx_hash,
+                                output_index,
+                            },
+                            amount: output.amount,
+                            script: output.conditions.script.clone(),
+                            written_time: current_time,
+                        },
+                    )?;
+                }
+
+                // Insert tx itself
+                txs.upsert(
+                    HashKeyV2(tx_hash),
+                    TxDbV2 {
+                        tx,
+                        written_block: current_blockstamp,
+                        written_time: current_time,
+                    },
+                );
+
+                Ok(())
+            },
+        )?;
+
+    Ok(())
+}
+
+pub(crate) fn revert_tx<B: Backend>(
+    gva_db: &GvaV1Db<B>,
+    tx_hash: &Hash,
+) -> KvResult<Option<TransactionDocumentV10>> {
+    if let Some(tx_db) = gva_db.txs().get(&HashKeyV2::from_ref(tx_hash))? {
+        let written_time = tx_db.written_time;
+
+        (
+            gva_db.scripts_by_pubkey_write(),
+            gva_db.txs_by_issuer_write(),
+            gva_db.txs_by_recipient_write(),
+            gva_db.txs_write(),
+            gva_db.utxos_by_script_write(),
+        )
+            .write(
+                |(
+                    mut scripts_by_pubkey,
+                    mut txs_by_issuer,
+                    mut txs_by_recipient,
+                    mut txs,
+                    mut utxos_by_script,
+                )| {
+                    // Remove UTXOs created by this tx
+                    use dubp::documents::transaction::TransactionDocumentTrait as _;
+                    for output in tx_db.tx.get_outputs() {
+                        let script = &output.conditions.script;
+                        utxos::remove_utxo_v10::<B>(
+                            &mut scripts_by_pubkey,
+                            &mut utxos_by_script,
+                            script,
+                            written_time,
+                        )?;
+                    }
+                    // Recreate UTXOs consumed by this tx
+                    for input in tx_db.tx.get_inputs() {
+                        // TODO ESZ recreate UD sources
+                        if let SourceIdV10::Utxo(utxo_id) = input.id {
+                            let db_tx_origin = gva_db
+                                .txs()
+                                .get(&HashKeyV2::from_ref(&utxo_id.tx_hash))?
+                                .ok_or_else(|| {
+                                    KvError::DbCorrupted(format!(
+                                        "Not found origin tx of uxto {}",
+                                        utxo_id
+                                    ))
+                                })?;
+                            let utxo_script = db_tx_origin.tx.get_outputs()[utxo_id.output_index]
+                                .conditions
+                                .script
+                                .clone();
+                            utxos::write_utxo_v10::<B>(
+                                &mut scripts_by_pubkey,
+                                &mut utxos_by_script,
+                                UtxoV10 {
+                                    id: utxo_id,
+                                    amount: input.amount,
+                                    script: utxo_script,
+                                    written_time: db_tx_origin.written_time,
+                                },
+                            )?;
+                        }
+                    }
+                    // Remove tx
+                    remove_tx::<B>(
+                        &mut txs_by_issuer,
+                        &mut txs_by_recipient,
+                        &mut txs,
+                        tx_hash,
+                        &tx_db,
+                    )?;
+                    Ok(())
+                },
+            )?;
+
+        Ok(Some(tx_db.tx))
+    } else {
+        Ok(None)
+    }
+}
+
+fn remove_tx<B: Backend>(
+    txs_by_issuer: &mut TxColRw<B::Col, TxsByIssuerEvent>,
+    txs_by_recipient: &mut TxColRw<B::Col, TxsByRecipientEvent>,
+    txs: &mut TxColRw<B::Col, TxEvent>,
+    tx_hash: &Hash,
+    tx_db: &TxDbV2,
+) -> KvResult<()> {
+    // Remove tx hash in col `txs_by_issuer`
+    for pubkey in tx_db.tx.issuers() {
+        let mut hashs_ = txs_by_issuer.get(&PubKeyKeyV2(pubkey))?.unwrap_or_default();
+        hashs_.0.remove(&tx_hash);
+        txs_by_issuer.upsert(PubKeyKeyV2(pubkey), hashs_)
+    }
+    // Remove tx hash in col `txs_by_recipient`
+    for pubkey in tx_db.tx.recipients_keys() {
+        let mut hashs_ = txs_by_recipient
+            .get(&PubKeyKeyV2(pubkey))?
+            .unwrap_or_default();
+        hashs_.0.remove(&tx_hash);
+        txs_by_recipient.upsert(PubKeyKeyV2(pubkey), hashs_)
+    }
+    // Remove tx itself
+    txs.remove(HashKeyV2(*tx_hash));
+    Ok(())
+}
diff --git a/rust-libs/duniter-dbs-writer/src/utxos.rs b/rust-libs/duniter-dbs-write-ops/src/utxos.rs
similarity index 54%
rename from rust-libs/duniter-dbs-writer/src/utxos.rs
rename to rust-libs/duniter-dbs-write-ops/src/utxos.rs
index de83d458f3a5f44cc18a02f8bd6cd5c2fe14560f..869d6d9cf16dd6591150d0ac4c502f1ee8936f2a 100644
--- a/rust-libs/duniter-dbs-writer/src/utxos.rs
+++ b/rust-libs/duniter-dbs-write-ops/src/utxos.rs
@@ -22,19 +22,19 @@ pub struct UtxoV10 {
     pub written_time: i64,
 }
 
-pub(crate) fn write_utxo_v10<B: Backend>(gva_db: &GvaV1Db<B>, utxo: UtxoV10) -> KvResult<()> {
+pub(crate) fn write_utxo_v10<B: Backend>(
+    scripts_by_pubkey: &mut TxColRw<B::Col, duniter_dbs::gva_v1::ScriptsByPubkeyEvent>,
+    utxos_by_script: &mut TxColRw<B::Col, duniter_dbs::gva_v1::UtxosByScriptEvent>,
+    utxo: UtxoV10,
+) -> KvResult<()> {
     for pubkey in utxo.script.pubkeys() {
-        let mut pubkey_scripts = gva_db
-            .scripts_by_pubkey()
+        let mut pubkey_scripts = scripts_by_pubkey
             .get(&PubKeyKeyV2(pubkey))?
             .unwrap_or_default();
         pubkey_scripts.0.insert(utxo.script.clone());
-        gva_db
-            .scripts_by_pubkey_write()
-            .upsert(PubKeyKeyV2(pubkey), pubkey_scripts)?;
+        scripts_by_pubkey.upsert(PubKeyKeyV2(pubkey), pubkey_scripts);
     }
-    let mut utxo_of_script = gva_db
-        .utxos_by_script()
+    let mut utxo_of_script = utxos_by_script
         .get(WalletConditionsV2::from_ref(&utxo.script))?
         .unwrap_or_default();
     utxo_of_script
@@ -42,43 +42,38 @@ pub(crate) fn write_utxo_v10<B: Backend>(gva_db: &GvaV1Db<B>, utxo: UtxoV10) ->
         .entry(utxo.written_time)
         .or_default()
         .push((utxo.id, utxo.amount));
-    gva_db
-        .utxos_by_script_write()
-        .upsert(WalletConditionsV2(utxo.script), utxo_of_script)?;
+    utxos_by_script.upsert(WalletConditionsV2(utxo.script), utxo_of_script);
+
     Ok(())
 }
 
 pub(crate) fn remove_utxo_v10<B: Backend>(
-    gva_db: &GvaV1Db<B>,
+    scripts_by_pubkey: &mut TxColRw<B::Col, duniter_dbs::gva_v1::ScriptsByPubkeyEvent>,
+    utxos_by_script: &mut TxColRw<B::Col, duniter_dbs::gva_v1::UtxosByScriptEvent>,
     utxo_script: &WalletScriptV10,
     written_time: i64,
 ) -> KvResult<()> {
-    if let Some(mut utxos_of_script) = gva_db
-        .utxos_by_script()
-        .get(&WalletConditionsV2::from_ref(utxo_script))?
+    if let Some(mut utxos_of_script) =
+        utxos_by_script.get(&WalletConditionsV2::from_ref(utxo_script))?
     {
         utxos_of_script.0.remove(&written_time);
         if utxos_of_script.0.is_empty() {
             let pubkeys = utxo_script.pubkeys();
             for pubkey in pubkeys {
-                let mut pubkey_scripts = gva_db
-                    .scripts_by_pubkey()
-                    .get(&PubKeyKeyV2(pubkey))?
-                    .ok_or_else(|| {
-                        KvError::DbCorrupted(format!(
-                            "GVA: key {} dont exist on col `scripts_by_pubkey`.",
-                            pubkey,
-                        ))
-                    })?;
+                let mut pubkey_scripts =
+                    scripts_by_pubkey
+                        .get(&PubKeyKeyV2(pubkey))?
+                        .ok_or_else(|| {
+                            KvError::DbCorrupted(format!(
+                                "GVA: key {} dont exist on col `scripts_by_pubkey`.",
+                                pubkey,
+                            ))
+                        })?;
                 pubkey_scripts.0.remove(utxo_script);
-                gva_db
-                    .scripts_by_pubkey_write()
-                    .upsert(PubKeyKeyV2(pubkey), pubkey_scripts)?;
+                scripts_by_pubkey.upsert(PubKeyKeyV2(pubkey), pubkey_scripts);
             }
         }
-        gva_db
-            .utxos_by_script_write()
-            .upsert(WalletConditionsV2(utxo_script.clone()), utxos_of_script)?;
+        utxos_by_script.upsert(WalletConditionsV2(utxo_script.clone()), utxos_of_script);
     }
     Ok(())
 }
diff --git a/rust-libs/duniter-dbs-writer/src/gva_writer.rs b/rust-libs/duniter-dbs-writer/src/gva_writer.rs
deleted file mode 100644
index d2a93f8e331049c9a27575fdff9768c7c7a87091..0000000000000000000000000000000000000000
--- a/rust-libs/duniter-dbs-writer/src/gva_writer.rs
+++ /dev/null
@@ -1,45 +0,0 @@
-//  Copyright (C) 2020 Éloïs SANCHEZ.
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-use crate::*;
-
-pub struct GvaWriter {
-    txs_mempool_size: usize,
-    writer_sender: flume::Sender<DbsWriterMsg>,
-}
-
-impl GvaWriter {
-    pub fn mock() -> Self {
-        Self {
-            txs_mempool_size: 0,
-            writer_sender: flume::bounded(0).0,
-        }
-    }
-    pub fn new(txs_mempool_size: usize, writer_sender: flume::Sender<DbsWriterMsg>) -> Self {
-        Self {
-            txs_mempool_size,
-            writer_sender,
-        }
-    }
-    pub fn add_pending_tx(&self, tx: TransactionDocumentV10) -> Receiver<KvResult<bool>> {
-        let (sender, receiver) = flume::bounded(0);
-        let _ = self.writer_sender.send(DbsWriterMsg::AddPendingTx {
-            tx,
-            max_tx_mp_size_opt: Some(self.txs_mempool_size),
-            sender,
-        });
-        receiver
-    }
-}
diff --git a/rust-libs/duniter-dbs-writer/src/lib.rs b/rust-libs/duniter-dbs-writer/src/lib.rs
deleted file mode 100644
index 07d7a03dbf8bd7855f789960d54d845083316ce1..0000000000000000000000000000000000000000
--- a/rust-libs/duniter-dbs-writer/src/lib.rs
+++ /dev/null
@@ -1,518 +0,0 @@
-//  Copyright (C) 2020 Éloïs SANCHEZ.
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-#![deny(
-    clippy::unwrap_used,
-    missing_copy_implementations,
-    trivial_casts,
-    trivial_numeric_casts,
-    unstable_features,
-    unused_import_braces
-)]
-
-mod gva_writer;
-mod identities;
-mod tx;
-mod utxos;
-
-pub use gva_writer::GvaWriter;
-
-use crate::utxos::UtxoV10;
-use dubp::block::DubpBlockV10Stringified;
-use dubp::common::crypto::bases::BaseConversionError;
-use dubp::common::crypto::hashs::Hash;
-use dubp::common::crypto::keys::ed25519::PublicKey;
-use dubp::common::prelude::*;
-use dubp::documents::{
-    prelude::*, smallvec::SmallVec, transaction::TransactionDocumentTrait,
-    transaction::TransactionDocumentV10,
-};
-use dubp::documents_parser::prelude::*;
-use dubp::wallet::prelude::*;
-use duniter_dbs::{
-    kv_typed::prelude::*,
-    //BlockNumberArrayV2, BlockNumberKeyV2, SourceAmountValV2, UtxosOfScriptV1
-    GvaV1Db,
-    GvaV1DbReadable,
-    GvaV1DbWritable,
-    HashKeyV2,
-    PendingTxDbV2,
-    PubKeyKeyV2,
-    TxDbV2,
-    TxsMpV2Db,
-    TxsMpV2DbReadable,
-    TxsMpV2DbWritable,
-    WalletConditionsV2,
-};
-use flume::{Receiver, Sender};
-use resiter::flatten::Flatten;
-use resiter::map::Map;
-
-pub struct DbsWriter<B: Backend> {
-    gva_db: GvaV1Db<B>,
-    new_pending_txs: Vec<TransactionDocumentV10>,
-    recv: flume::Receiver<DbsWriterMsg>,
-    server_pubkey: PublicKey,
-    txs_mp_db: TxsMpV2Db<B>,
-    txs_mp_len: usize,
-}
-
-pub enum DbsWriterMsg {
-    AddPendingTx {
-        tx: TransactionDocumentV10,
-        max_tx_mp_size_opt: Option<usize>,
-        sender: Sender<KvResult<bool>>,
-    },
-    ApplyBlock {
-        block: DubpBlockV10Stringified,
-        sender: Sender<KvResult<()>>,
-    },
-    ApplyChunkOfBlocks {
-        blocks: Vec<DubpBlockV10Stringified>,
-        sender: Sender<KvResult<()>>,
-    },
-    GetNewPendingTxs(Sender<Vec<TransactionDocumentV10>>),
-    GetTxsMpLen(Sender<usize>),
-    RemoveAllPendingTxs(Sender<KvResult<()>>),
-    RemovePendingTxByHash(Hash, Sender<KvResult<()>>),
-    RevertBlock {
-        block: DubpBlockV10Stringified,
-        sender: Sender<KvResult<()>>,
-    },
-    TrimExpiredNonWrittenTxs {
-        limit_time: i64,
-        sender: Sender<KvResult<()>>,
-    },
-    Stop,
-}
-
-impl<B: Backend> DbsWriter<B> {
-    pub fn new(
-        gva_db: GvaV1Db<B>,
-        server_pubkey: PublicKey,
-        txs_mp_db: TxsMpV2Db<B>,
-    ) -> (Self, flume::Sender<DbsWriterMsg>) {
-        let (sender, recv) = flume::bounded(64);
-        let txs_mp_len = txs_mp_db
-            .txs()
-            .count()
-            .expect("fail to init rust server: fail to get txs_mp_len");
-        (
-            DbsWriter {
-                gva_db,
-                new_pending_txs: Vec::new(),
-                recv,
-                server_pubkey,
-                txs_mp_db,
-                txs_mp_len,
-            },
-            sender,
-        )
-    }
-    pub fn main_loop(mut self) {
-        while let Ok(msg) = self.recv.recv() {
-            match msg {
-                DbsWriterMsg::AddPendingTx {
-                    tx,
-                    max_tx_mp_size_opt,
-                    sender,
-                } => {
-                    let accepted = if let Some(max_tx_mp_size) = max_tx_mp_size_opt {
-                        self.txs_mp_len < max_tx_mp_size
-                            || tx.issuers().contains(&self.server_pubkey)
-                    } else {
-                        true
-                    };
-                    if accepted {
-                        let res = self.add_pending_tx(tx.clone());
-                        if res.is_ok() {
-                            self.new_pending_txs.push(tx);
-                            self.txs_mp_len += 1;
-                        }
-                        let _ = sender.send(res.map(|()| true));
-                    } else {
-                        let _ = sender.send(Ok(false));
-                    }
-                }
-                DbsWriterMsg::ApplyBlock { block, sender } => {
-                    let _ = sender.send(self.apply_block(block));
-                }
-                DbsWriterMsg::ApplyChunkOfBlocks { blocks, sender } => {
-                    let _ = sender.send(self.apply_chunk_of_blocks(blocks));
-                }
-                DbsWriterMsg::GetNewPendingTxs(sender) => {
-                    let _ = sender.send(self.new_pending_txs.drain(..).collect());
-                }
-                DbsWriterMsg::GetTxsMpLen(sender) => {
-                    let _ = sender.send(self.txs_mp_len);
-                }
-                DbsWriterMsg::RemoveAllPendingTxs(sender) => {
-                    let res = self.remove_all_pending_txs();
-                    if res.is_ok() {
-                        self.txs_mp_len = 0;
-                    }
-                    let _ = sender.send(res);
-                }
-                DbsWriterMsg::RemovePendingTxByHash(hash, sender) => {
-                    let _ = sender.send(self.remove_pending_tx_by_hash(hash));
-                }
-                DbsWriterMsg::RevertBlock { block, sender } => {
-                    let _ = sender.send(self.revert_block(block));
-                }
-                DbsWriterMsg::TrimExpiredNonWrittenTxs { limit_time, sender } => {
-                    let _ = sender.send(self.trim_expired_non_written_txs(limit_time));
-                }
-                DbsWriterMsg::Stop => break,
-            }
-        }
-        self.gva_db.save().expect("fail to save GVA DB");
-        self.txs_mp_db.save().expect("fail to save TxsMp DB");
-    }
-
-    fn add_pending_tx(&self, tx: TransactionDocumentV10) -> KvResult<()> {
-        let tx_hash = tx.get_hash();
-        let received_time = chrono::offset::Utc::now().timestamp();
-        // Insert on col `txs_by_recv_time`
-        let mut hashs = self
-            .txs_mp_db
-            .txs_by_recv_time()
-            .get(&received_time)?
-            .unwrap_or_default();
-        hashs.0.insert(tx_hash);
-        self.txs_mp_db
-            .txs_by_recv_time_write()
-            .upsert(received_time, hashs)?;
-
-        // Insert on col `txs_by_issuer`
-        for pubkey in tx.issuers() {
-            let mut hashs = self
-                .txs_mp_db
-                .txs_by_issuer()
-                .get(&PubKeyKeyV2(pubkey))?
-                .unwrap_or_default();
-            hashs.0.insert(tx.get_hash());
-            self.txs_mp_db
-                .txs_by_issuer_write()
-                .upsert(PubKeyKeyV2(pubkey), hashs)?;
-        }
-        // Insert on col `txs_by_recipient`
-        for pubkey in tx.recipients_keys() {
-            let mut hashs = self
-                .txs_mp_db
-                .txs_by_recipient()
-                .get(&PubKeyKeyV2(pubkey))?
-                .unwrap_or_default();
-            hashs.0.insert(tx.get_hash());
-            self.txs_mp_db
-                .txs_by_recipient_write()
-                .upsert(PubKeyKeyV2(pubkey), hashs)?;
-        }
-        // Insert tx itself
-        self.txs_mp_db
-            .txs_write()
-            .upsert(HashKeyV2(tx_hash), PendingTxDbV2(tx))
-    }
-
-    fn remove_all_pending_txs(&self) -> KvResult<()> {
-        self.txs_mp_db.txs_write().clear()?;
-        self.txs_mp_db.txs_by_issuer_write().clear()?;
-        self.txs_mp_db.txs_by_recipient_write().clear()?;
-        self.txs_mp_db.txs_by_recv_time_write().clear()?;
-        Ok(())
-    }
-
-    fn remove_pending_tx_by_hash(&mut self, hash: Hash) -> KvResult<()> {
-        if remove_one_pending_tx(&self.txs_mp_db, hash)? {
-            self.txs_mp_len -= 1;
-        }
-        Ok(())
-    }
-
-    fn revert_block(&self, block: DubpBlockV10Stringified) -> KvResult<()> {
-        let block_txs_hashs = block
-            .transactions
-            .iter()
-            .map(|tx| {
-                if let Some(ref tx_hash) = tx.hash {
-                    Ok(Hash::from_hex(&tx_hash))
-                } else {
-                    Err(KvError::DeserError(
-                        "Try to revert a block that contains a transaction without hash !"
-                            .to_owned(),
-                    ))
-                }
-            })
-            .collect::<KvResult<Result<Vec<Hash>, BaseConversionError>>>()?
-            .map_err(|e| KvError::DeserError(format!("Transaction with invalid hash: {}", e)))?;
-        for tx_hash in block_txs_hashs {
-            let tx = tx::revert_tx(&self.gva_db, &tx_hash)?.ok_or_else(|| {
-                KvError::DbCorrupted(format!("GVA: tx '{}' dont exist on txs history.", tx_hash,))
-            })?;
-            self.add_pending_tx(tx)?;
-        }
-
-        identities::revert_identities(&self.gva_db, &block)?;
-
-        Ok(())
-    }
-
-    fn apply_block(&self, block: DubpBlockV10Stringified) -> KvResult<()> {
-        let block_hash = if let Some(ref block_hash_str) = block.hash {
-            Hash::from_hex(&block_hash_str)
-                .map_err(|_| KvError::DeserError(format!("Hash '{}' is invalid", block_hash_str)))?
-        } else {
-            return Err(KvError::DeserError(format!(
-                "Block #{} is without hash",
-                block.number
-            )));
-        };
-        let blockstamp = Blockstamp {
-            number: BlockNumber(block.number as u32),
-            hash: BlockHash(block_hash),
-        };
-        let txs = block
-            .transactions
-            .iter()
-            .map(|tx_str| TransactionDocumentV10::from_string_object(tx_str))
-            .collect::<Result<Vec<TransactionDocumentV10>, TextParseError>>()
-            .map_err(|e| KvError::DeserError(format!("Invalid transaction in block: {}", e)))?;
-        self.write_block_txs(blockstamp, block.median_time as i64, txs)?;
-
-        identities::update_identities(&self.gva_db, &block)?;
-
-        Ok(())
-    }
-
-    #[inline(always)]
-    fn apply_chunk_of_blocks(&self, blocks: Vec<DubpBlockV10Stringified>) -> KvResult<()> {
-        for block in blocks {
-            if block.number > 300_000 {
-                log::info!("apply_block(#{})", block.number);
-            }
-            self.apply_block(block)?;
-        }
-        Ok(())
-    }
-
-    fn write_block_txs(
-        &self,
-        current_blockstamp: Blockstamp,
-        current_time: i64,
-        txs: Vec<TransactionDocumentV10>,
-    ) -> KvResult<()> {
-        for tx in txs {
-            let tx_hash = tx.get_hash();
-            // Remove tx from mempool
-            remove_one_pending_tx(&self.txs_mp_db, tx_hash)?;
-            // Write tx and update sources
-            tx::write_tx(current_blockstamp, current_time, &self.gva_db, tx_hash, tx)?;
-        }
-        Ok(())
-    }
-
-    fn trim_expired_non_written_txs(&mut self, limit_time: i64) -> KvResult<()> {
-        // Get hashs of tx to remove and "times" to remove
-        let mut times = Vec::new();
-        let hashs = self
-            .txs_mp_db
-            .txs_by_recv_time()
-            .iter(..limit_time)
-            .map_ok(|(k, v)| {
-                times.push(k);
-                v.0
-            })
-            .flatten_ok()
-            .collect::<KvResult<SmallVec<[Hash; 4]>>>()?;
-        // For each tx to remove
-        for hash in hashs {
-            if remove_one_pending_tx(&self.txs_mp_db, hash)? {
-                self.txs_mp_len -= 1;
-            }
-        }
-        // Remove txs hashs in col `txs_by_recv_time`
-        for time in times {
-            self.txs_mp_db.txs_by_recv_time_write().remove(time)?;
-        }
-
-        Ok(())
-    }
-}
-
-fn remove_one_pending_tx<B: Backend>(txs_mp_db: &TxsMpV2Db<B>, tx_hash: Hash) -> KvResult<bool> {
-    if let Some(tx) = txs_mp_db.txs().get(&HashKeyV2(tx_hash))? {
-        // Remove tx hash in col `txs_by_issuer`
-        for pubkey in tx.0.issuers() {
-            let mut hashs_ = txs_mp_db
-                .txs_by_issuer()
-                .get(&PubKeyKeyV2(pubkey))?
-                .unwrap_or_default();
-            hashs_.0.remove(&tx_hash);
-            txs_mp_db
-                .txs_by_issuer_write()
-                .upsert(PubKeyKeyV2(pubkey), hashs_)?
-        }
-        // Remove tx hash in col `txs_by_recipient`
-        for pubkey in tx.0.recipients_keys() {
-            let mut hashs_ = txs_mp_db
-                .txs_by_recipient()
-                .get(&PubKeyKeyV2(pubkey))?
-                .unwrap_or_default();
-            hashs_.0.remove(&tx_hash);
-            txs_mp_db
-                .txs_by_recipient_write()
-                .upsert(PubKeyKeyV2(pubkey), hashs_)?
-        }
-        // Remove tx itself
-        txs_mp_db.txs_write().remove(HashKeyV2(tx_hash))?;
-        Ok(true)
-    } else {
-        Ok(false)
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-    use dubp::documents::transaction::TransactionDocumentV10Stringified;
-
-    #[test]
-    #[ignore]
-    fn tmp_apply_block_real() -> KvResult<()> {
-        let (writer, _) = DbsWriter::<Sled>::new(
-            GvaV1Db::open(
-                SledConf::default()
-                    .path("/home/elois/.config/duniter/s2/data/gva_v1_sled")
-                    .flush_every_ms(None),
-            )?,
-            PublicKey::default(),
-            TxsMpV2Db::open(
-                SledConf::default()
-                    .path("/home/elois/.config/duniter/s2/data/txs_mp_v2_sled")
-                    .flush_every_ms(None),
-            )?,
-        );
-
-        let txs: Vec<TransactionDocumentV10Stringified> = serde_json::from_str(r#"[
-            {
-              "version": 10,
-              "currency": "g1",
-              "comment": ". je me sens plus legere mm si....reste le bon toit a trouver dans un temps record ! Merci pour cet eclairage fort",
-              "locktime": 0,
-              "signatures": [
-                "8t5vo+k5OvkyAd+L+J8g6MLpp/AP0qOQFcJvf+OPMEZaVnHH38YtCigo64unU9aCsb9zZc6UEc78ZrkQ/E2TCg=="
-              ],
-              "outputs": [
-                "5000:0:SIG(5VYg9YHvLQuoky7EPyyk3cEfBUtB1GuAeJ6SiJ6c9wWe)",
-                "55:0:SIG(Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x)"
-              ],
-              "inputs": [
-                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:296658",
-                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:296936",
-                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:297211",
-                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:297489",
-                "1011:0:D:Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x:297786"
-              ],
-              "unlocks": [
-                "0:SIG(0)",
-                "1:SIG(0)",
-                "2:SIG(0)",
-                "3:SIG(0)",
-                "4:SIG(0)"
-              ],
-              "blockstamp": "304284-000003F738B9A5FC8F5D04B4B9746FD899B3A49367099BB2796E7EF976DCDABB",
-              "blockstampTime": 0,
-              "issuers": [
-                "Ceq5Y6W5kjFkPrvcx5oAgugLMTwcEXyWgfn3P85TSj7x"
-              ],
-              "block_number": 0,
-              "time": 0
-            },
-            {
-              "version": 10,
-              "currency": "g1",
-              "comment": "Pour les places de cine et l expedition ..Merci",
-              "locktime": 0,
-              "signatures": [
-                "VhzwAwsCr30XnetveS74QD2kJMYCQ89VZvyUBJM9DP/kd5KBqkF1c1HcKpJdHrfu2oq3JbSEIhEf/aLgnEdSCw=="
-              ],
-              "outputs": [
-                "6000:0:SIG(jUPLL2BgY2QpheWEY3R13edV2Y4tvQMCXjJVM8PGDvyd)",
-                "10347:0:SIG(2CWxxkttvkGSUVZdaUZHiksNisDC3wJx32Y2NVAyeHez)"
-              ],
-              "inputs": [
-                "347:0:T:4EA4D01422469ABA380F48A48254EB3F15606C12FE4CFF7E7D6EEB1FD9752DDB:1",
-                "16000:0:T:9A4DA56EF5F9B50D612D806BAE0886EB3033B4F166D2E96498DE16B83F39B59D:0"
-              ],
-              "unlocks": [
-                "0:SIG(0)",
-                "1:SIG(0)"
-              ],
-              "blockstamp": "304284-000003F738B9A5FC8F5D04B4B9746FD899B3A49367099BB2796E7EF976DCDABB",
-              "blockstampTime": 0,
-              "issuers": [
-                "2CWxxkttvkGSUVZdaUZHiksNisDC3wJx32Y2NVAyeHez"
-              ],
-              "block_number": 0,
-              "time": 0
-            },
-            {
-              "version": 10,
-              "currency": "g1",
-              "comment": "POur le sac a tarte merci",
-              "locktime": 0,
-              "signatures": [
-                "721K4f+F9PgksoVDZgQTURJIO/DZUhQfAzXfBvYrFkgqHNNeBbcgGecFX63rPYjFvau+qg1Hmi0coL9z7r7EAQ=="
-              ],
-              "outputs": [
-                "15000:0:SIG(KxyNK1k55PEA8eBjX1K4dLJr35gC2dwMwNFPHwvZFH4)",
-                "17668:0:SIG(4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg)"
-              ],
-              "inputs": [
-                "1011:0:D:4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg:303924",
-                "1011:0:D:4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg:304212",
-                "10458:0:T:55113E18AB61603AD0FC24CD11ACBC96F9583FD0A5877055F17315E9613BBF7D:1",
-                "20188:0:T:937A0454C1A63B383FBB6D219B9312B0A36DFE19DA08076BD113F9D5D4FC903D:1"
-              ],
-              "unlocks": [
-                "0:SIG(0)",
-                "1:SIG(0)",
-                "2:SIG(0)",
-                "3:SIG(0)"
-              ],
-              "blockstamp": "304284-000003F738B9A5FC8F5D04B4B9746FD899B3A49367099BB2796E7EF976DCDABB",
-              "blockstampTime": 0,
-              "issuers": [
-                "4VQvVLT1R6upLuRk85A5eWTowqJwvkSMGQQZ9Hc4bqLg"
-              ],
-              "block_number": 0,
-              "time": 0
-            }
-          ]"#).expect("wrong tx");
-
-        let block = DubpBlockV10Stringified {
-            number: 304286,
-            hash: Some(
-                "000001339AECF3CAB78B2B61776FB3819B800AB43923F4F8BD0F5AE47B7DEAB9".to_owned(),
-            ),
-            median_time: 1583862823,
-            transactions: txs,
-            ..Default::default()
-        };
-
-        writer.apply_block(block)?;
-
-        Ok(())
-    }
-}
diff --git a/rust-libs/duniter-dbs-writer/src/tx.rs b/rust-libs/duniter-dbs-writer/src/tx.rs
deleted file mode 100644
index 5f1287a9b56875f680474ca15bc00028711db8c2..0000000000000000000000000000000000000000
--- a/rust-libs/duniter-dbs-writer/src/tx.rs
+++ /dev/null
@@ -1,164 +0,0 @@
-//  Copyright (C) 2020 Éloïs SANCHEZ.
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with this program.  If not, see <https://www.gnu.org/licenses/>.
-
-use crate::*;
-
-pub(crate) fn write_tx<B: Backend>(
-    current_blockstamp: Blockstamp,
-    current_time: i64,
-    gva_db: &GvaV1Db<B>,
-    tx_hash: Hash,
-    tx: TransactionDocumentV10,
-) -> KvResult<()> {
-    // Insert on col `txs_by_issuer`
-    for pubkey in tx.issuers() {
-        let mut hashs = gva_db
-            .txs_by_issuer()
-            .get(&PubKeyKeyV2(pubkey))?
-            .unwrap_or_default();
-        hashs.0.insert(tx_hash);
-        gva_db
-            .txs_by_issuer_write()
-            .upsert(PubKeyKeyV2(pubkey), hashs)?;
-    }
-    // Insert on col `txs_by_recipient`
-    for pubkey in tx.recipients_keys() {
-        let mut hashs = gva_db
-            .txs_by_recipient()
-            .get(&PubKeyKeyV2(pubkey))?
-            .unwrap_or_default();
-        hashs.0.insert(tx_hash);
-        gva_db
-            .txs_by_recipient_write()
-            .upsert(PubKeyKeyV2(pubkey), hashs)?;
-    }
-    // Remove consumed UTXOs
-    for input in tx.get_inputs() {
-        // TODO ESZ remove UD sources
-        if let SourceIdV10::Utxo(utxo_id) = input.id {
-            let db_tx_origin = gva_db
-                .txs()
-                .get(&HashKeyV2::from_ref(&utxo_id.tx_hash))?
-                .ok_or_else(|| {
-                    KvError::DbCorrupted(format!("Not found origin tx of uxto {}", utxo_id))
-                })?;
-            let utxo_script = db_tx_origin.tx.get_outputs()[utxo_id.output_index]
-                .conditions
-                .script
-                .clone();
-            utxos::remove_utxo_v10(&gva_db, &utxo_script, db_tx_origin.written_time)?;
-        }
-    }
-    // Insert created UTXOs
-    for (output_index, output) in tx.get_outputs().iter().enumerate() {
-        utxos::write_utxo_v10(
-            &gva_db,
-            UtxoV10 {
-                id: UtxoIdV10 {
-                    tx_hash,
-                    output_index,
-                },
-                amount: output.amount,
-                script: output.conditions.script.clone(),
-                written_time: current_time,
-            },
-        )?;
-    }
-    // Insert tx itself
-    gva_db.txs_write().upsert(
-        HashKeyV2(tx_hash),
-        TxDbV2 {
-            tx,
-            written_block: current_blockstamp,
-            written_time: current_time,
-        },
-    )?;
-
-    Ok(())
-}
-
-pub(crate) fn revert_tx<B: Backend>(
-    gva_db: &GvaV1Db<B>,
-    tx_hash: &Hash,
-) -> KvResult<Option<TransactionDocumentV10>> {
-    if let Some(tx_db) = gva_db.txs().get(&HashKeyV2::from_ref(tx_hash))? {
-        let written_time = tx_db.written_time;
-        // Remove UTXOs created by this tx
-        use dubp::documents::transaction::TransactionDocumentTrait as _;
-        for output in tx_db.tx.get_outputs() {
-            let script = &output.conditions.script;
-            utxos::remove_utxo_v10(gva_db, script, written_time)?;
-        }
-        // Recreate UTXOs consumed by this tx
-        for input in tx_db.tx.get_inputs() {
-            // TODO ESZ recreate UD sources
-            if let SourceIdV10::Utxo(utxo_id) = input.id {
-                let db_tx_origin = gva_db
-                    .txs()
-                    .get(&HashKeyV2::from_ref(&utxo_id.tx_hash))?
-                    .ok_or_else(|| {
-                        KvError::DbCorrupted(format!("Not found origin tx of uxto {}", utxo_id))
-                    })?;
-                let utxo_script = db_tx_origin.tx.get_outputs()[utxo_id.output_index]
-                    .conditions
-                    .script
-                    .clone();
-                utxos::write_utxo_v10(
-                    gva_db,
-                    UtxoV10 {
-                        id: utxo_id,
-                        amount: input.amount,
-                        script: utxo_script,
-                        written_time: db_tx_origin.written_time,
-                    },
-                )?;
-            }
-        }
-        // Remove tx
-        remove_tx(gva_db, tx_hash, &tx_db)?;
-
-        Ok(Some(tx_db.tx))
-    } else {
-        Ok(None)
-    }
-}
-
-fn remove_tx<B: Backend>(gva_db: &GvaV1Db<B>, tx_hash: &Hash, tx_db: &TxDbV2) -> KvResult<()> {
-    // Remove tx hash in col `txs_by_issuer`
-    for pubkey in tx_db.tx.issuers() {
-        let mut hashs_ = gva_db
-            .txs_by_issuer()
-            .get(&PubKeyKeyV2(pubkey))?
-            .unwrap_or_default();
-        hashs_.0.remove(&tx_hash);
-        gva_db
-            .txs_by_issuer_write()
-            .upsert(PubKeyKeyV2(pubkey), hashs_)?
-    }
-    // Remove tx hash in col `txs_by_recipient`
-    for pubkey in tx_db.tx.recipients_keys() {
-        let mut hashs_ = gva_db
-            .txs_by_recipient()
-            .get(&PubKeyKeyV2(pubkey))?
-            .unwrap_or_default();
-        hashs_.0.remove(&tx_hash);
-        gva_db
-            .txs_by_recipient_write()
-            .upsert(PubKeyKeyV2(pubkey), hashs_)?
-    }
-    // Remove tx itself
-    gva_db.txs_write().remove(HashKeyV2(*tx_hash))?;
-    Ok(())
-}
diff --git a/rust-libs/duniter-dbs/Cargo.toml b/rust-libs/duniter-dbs/Cargo.toml
index 30ffadb8d346d4f4c3aacb0a16abdeec23e97156..1dfc931750d6b7a8d2acf52442f99247dfacc56b 100644
--- a/rust-libs/duniter-dbs/Cargo.toml
+++ b/rust-libs/duniter-dbs/Cargo.toml
@@ -32,12 +32,10 @@ tempdir = "0.3.7"
 unwrap = "1.2.1"
 
 [features]
-default = ["memory_backend", "sled_backend", "subscription", "sync"]
+default = ["sled_backend"]
 
 explorer = ["chrono", "kv_typed/explorer"]
 leveldb_backend = ["kv_typed/leveldb_backend"]
-memory_backend = ["kv_typed/memory_backend"]
-mock = ["kv_typed/mock", "mockall"]
+mem = []
+#mock = ["kv_typed/mock", "mockall"]
 sled_backend = ["kv_typed/sled_backend"]
-subscription = ["kv_typed/subscription"]
-sync = ["kv_typed/sync"]
diff --git a/rust-libs/duniter-dbs/src/lib.rs b/rust-libs/duniter-dbs/src/lib.rs
index 04b79478f73db09e644e4afd7e0051658953367f..01dd5e49e3da1c0c5dccfd87e88e05ac3783e662 100644
--- a/rust-libs/duniter-dbs/src/lib.rs
+++ b/rust-libs/duniter-dbs/src/lib.rs
@@ -24,9 +24,9 @@
 
 mod bc_v1;
 mod errors;
-mod gva_v1;
+pub mod gva_v1;
 mod keys;
-mod txs_mp_v2;
+pub mod txs_mp_v2;
 mod values;
 
 // Re-export dependencies
@@ -43,6 +43,7 @@ pub use kv_typed;
 // Prelude
 pub mod prelude {
     pub use crate::errors::ErrorDb;
+    pub use crate::{DbsBackend, DuniterDbs};
     #[cfg(feature = "explorer")]
     pub use kv_typed::explorer::{
         DbExplorable, EntryFound, ExplorerAction, ExplorerActionResponse, ValueCaptures,
@@ -65,7 +66,7 @@ pub use keys::source_key::SourceKeyV1;
 pub use keys::timestamp::TimestampKeyV1;
 pub use keys::uid::UidKeyV1;
 pub use keys::wallet_conditions::{WalletConditionsV1, WalletConditionsV2};
-pub use txs_mp_v2::{TxEvent, TxsMpV2Db, TxsMpV2DbReadable, TxsMpV2DbRo, TxsMpV2DbWritable};
+pub use txs_mp_v2::{TxsMpV2Db, TxsMpV2DbReadable, TxsMpV2DbRo, TxsMpV2DbWritable};
 pub use values::block_db::{BlockDbEnum, BlockDbV1, TransactionInBlockDbV1};
 pub use values::block_head_db::BlockHeadDbV1;
 pub use values::block_number_array_db::{BlockNumberArrayV1, BlockNumberArrayV2};
@@ -110,16 +111,15 @@ pub trait ToDumpString {
     fn to_dump_string(&self) -> String;
 }
 
+#[cfg(feature = "mem")]
+pub type DbsBackend = kv_typed::backend::memory::Mem;
+#[cfg(all(not(feature = "mem"), target_arch = "x86_64"))]
+pub type DbsBackend = Lmdb;
+#[cfg(all(not(feature = "mem"), not(target_arch = "x86_64")))]
+pub type DbsBackend = Sled;
+
 #[derive(Clone, Debug)]
-pub enum DbsRo {
-    #[cfg(feature = "sled_backend")]
-    File {
-        gva_db_ro: GvaV1DbRo<Sled>,
-        txs_mp_db_ro: TxsMpV2DbRo<Sled>,
-    },
-    #[cfg(feature = "memory_backend")]
-    Mem {
-        gva_db_ro: GvaV1DbRo<Mem>,
-        txs_mp_db_ro: TxsMpV2DbRo<Mem>,
-    },
+pub struct DuniterDbs {
+    pub gva_db: GvaV1Db<DbsBackend>,
+    pub txs_mp_db: TxsMpV2Db<DbsBackend>,
 }
diff --git a/rust-libs/duniter-dbs/src/values/block_number_array_db.rs b/rust-libs/duniter-dbs/src/values/block_number_array_db.rs
index 319a20300dcef59c6d8007f93aacf912b4553b3c..20286a3e0a0be4d46cb86322db8959fdf16e815f 100644
--- a/rust-libs/duniter-dbs/src/values/block_number_array_db.rs
+++ b/rust-libs/duniter-dbs/src/values/block_number_array_db.rs
@@ -15,7 +15,7 @@
 
 use crate::*;
 
-#[derive(Debug, Deserialize, PartialEq, Serialize)]
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
 pub struct BlockNumberArrayV1(pub SmallVec<[BlockNumber; 1]>);
 
 impl ValueAsBytes for BlockNumberArrayV1 {
diff --git a/rust-libs/duniter-dbs/src/values/cindex_db.rs b/rust-libs/duniter-dbs/src/values/cindex_db.rs
index 7afc8169fd37ea16a3b98b2d459aae8d95349b21..d3d160a948d6df009c721abb11474e0262235845 100644
--- a/rust-libs/duniter-dbs/src/values/cindex_db.rs
+++ b/rust-libs/duniter-dbs/src/values/cindex_db.rs
@@ -15,7 +15,7 @@
 
 use crate::*;
 
-#[derive(Debug, Deserialize, PartialEq, Serialize)]
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
 pub struct CIndexDbV1 {
     pub received: SmallVec<[String; 10]>,
     pub issued: Vec<CIndexLineDbV1>,
diff --git a/rust-libs/duniter-dbs/src/values/iindex_db.rs b/rust-libs/duniter-dbs/src/values/iindex_db.rs
index 70a4ebc32d93225843fae3686882f1241a830f16..a56e810527395037fe11cbb60c70ccd02a424c8c 100644
--- a/rust-libs/duniter-dbs/src/values/iindex_db.rs
+++ b/rust-libs/duniter-dbs/src/values/iindex_db.rs
@@ -15,7 +15,7 @@
 
 use crate::*;
 
-#[derive(Debug, Deserialize, PartialEq, Serialize)]
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
 pub struct IIndexDbV1(pub SmallVec<[IIndexLineDbV1; 1]>);
 
 impl ValueAsBytes for IIndexDbV1 {
diff --git a/rust-libs/duniter-dbs/src/values/mindex_db.rs b/rust-libs/duniter-dbs/src/values/mindex_db.rs
index f65f856c460f061b85cac2cf085856a05ef52ce7..bee30646d6e6d742ba217a7b1a816507a697abd9 100644
--- a/rust-libs/duniter-dbs/src/values/mindex_db.rs
+++ b/rust-libs/duniter-dbs/src/values/mindex_db.rs
@@ -15,7 +15,7 @@
 
 use crate::*;
 
-#[derive(Debug, Deserialize, PartialEq, Serialize)]
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
 pub struct MIndexDbV1(pub SmallVec<[MIndexLineDbV1; 1]>);
 
 impl ValueAsBytes for MIndexDbV1 {
diff --git a/rust-libs/duniter-dbs/src/values/wallet_db.rs b/rust-libs/duniter-dbs/src/values/wallet_db.rs
index 12b7a3fa9caa3bad9cb6e55e577fbb1c1b8ded77..17dac89c5a2ca6609de612a7e5182317ef2c6d4b 100644
--- a/rust-libs/duniter-dbs/src/values/wallet_db.rs
+++ b/rust-libs/duniter-dbs/src/values/wallet_db.rs
@@ -15,7 +15,7 @@
 
 use crate::*;
 
-#[derive(Debug, Deserialize, PartialEq, Serialize)]
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
 pub struct WalletDbV1 {
     pub conditions: String,
     pub balance: u64,
diff --git a/rust-libs/duniter-dbs/tests/test_read_write.rs b/rust-libs/duniter-dbs/tests/test_read_write.rs
index 08f6b3294304a2422129a8bf46cf7139d7666d65..ee06bb03097f2a23a8f10dffbadec59b7d51ddaa 100644
--- a/rust-libs/duniter-dbs/tests/test_read_write.rs
+++ b/rust-libs/duniter-dbs/tests/test_read_write.rs
@@ -104,8 +104,14 @@ fn write_read_delete_b0_test<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
         main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
         None
     );
-    assert_eq!(main_blocks_reader.iter(..).keys().next_res()?, None);
-    assert_eq!(main_blocks_reader.iter(..).values().next_res()?, None);
+    assert_eq!(
+        main_blocks_reader.iter(.., |iter| iter.keys().next_res())?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |iter| iter.values().next_res())?,
+        None
+    );
     if let Err(TryRecvError::Empty) = events_recv.try_recv() {
     } else {
         panic!("should not receive event");
@@ -125,15 +131,22 @@ fn write_read_delete_b0_test<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
         main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
         None
     );
-    let mut keys_iter = main_blocks_reader.iter(..).keys();
-    assert_eq!(
-        keys_iter.next_res()?,
-        Some(BlockNumberKeyV1(BlockNumber(0)))
-    );
-    assert_eq!(keys_iter.next_res()?, None);
-    let mut values_iter = main_blocks_reader.iter(..).values();
-    assert_eq!(values_iter.next_res()?, Some(b0.clone()));
-    assert_eq!(values_iter.next_res()?, None);
+    main_blocks_reader.iter(.., |iter| {
+        let mut keys_iter = iter.keys();
+        assert_eq!(
+            keys_iter.next_res()?,
+            Some(BlockNumberKeyV1(BlockNumber(0)))
+        );
+        assert_eq!(keys_iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+    main_blocks_reader.iter(.., |iter| {
+        let mut values_iter = iter.values();
+        assert_eq!(values_iter.next_res()?, Some(b0.clone()));
+        assert_eq!(values_iter.next_res()?, None);
+
+        Ok::<(), KvError>(())
+    })?;
     if let Ok(events) = events_recv.try_recv() {
         assert_eq!(events.len(), 1);
         let event = &events[0];
@@ -158,8 +171,14 @@ fn write_read_delete_b0_test<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
         main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
         None
     );
-    assert_eq!(main_blocks_reader.iter(..).keys().next_res()?, None);
-    assert_eq!(main_blocks_reader.iter(..).values().next_res()?, None);
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.keys().next_res())?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.values().next_res())?,
+        None
+    );
     if let Ok(events) = events_recv.try_recv() {
         assert_eq!(events.len(), 1);
         let event = &events[0];
@@ -196,48 +215,75 @@ fn write_some_entries_and_iter<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
 
     let uids_reader = db.uids();
     {
-        let mut values_iter_step_2 = uids_reader.iter(..).values().step_by(2);
-        assert_eq!(Some(p1), values_iter_step_2.next_res()?);
-        assert_eq!(Some(p3), values_iter_step_2.next_res()?);
-        assert_eq!(None, values_iter_step_2.next_res()?);
-
-        let mut entries_iter_step_2 = uids_reader.iter(..).step_by(2);
-        assert_eq!(Some((k1, p1)), entries_iter_step_2.next_res()?);
-        assert_eq!(Some((k3, p3)), entries_iter_step_2.next_res()?);
-        assert_eq!(None, entries_iter_step_2.next_res()?);
-
-        let mut entries_iter = uids_reader.iter(k2..);
-        assert_eq!(Some((k2, p2)), entries_iter.next_res()?);
-        assert_eq!(Some((k3, p3)), entries_iter.next_res()?);
-        assert_eq!(None, entries_iter.next_res()?);
-
-        let mut entries_iter = uids_reader.iter(..=k2);
-        assert_eq!(Some((k1, p1)), entries_iter.next_res()?);
-        assert_eq!(Some((k2, p2)), entries_iter.next_res()?);
-        assert_eq!(None, entries_iter.next_res()?);
-
-        let mut entries_iter_rev = uids_reader.iter(k2..).reverse();
-        assert_eq!(Some((k3, p3)), entries_iter_rev.next_res()?);
-        assert_eq!(Some((k2, p2)), entries_iter_rev.next_res()?);
-        assert_eq!(None, entries_iter_rev.next_res()?);
-
-        let mut entries_iter_rev = uids_reader.iter(..=k2).reverse();
-        assert_eq!(Some((k2, p2)), entries_iter_rev.next_res()?);
-        assert_eq!(Some((k1, p1)), entries_iter_rev.next_res()?);
-        assert_eq!(None, entries_iter.next_res()?);
-
-        let mut keys_iter_rev = uids_reader.iter(..=k2).keys().reverse();
-        assert_eq!(Some(k2), keys_iter_rev.next_res()?);
-        assert_eq!(Some(k1), keys_iter_rev.next_res()?);
-        assert_eq!(None, keys_iter_rev.next_res()?);
+        uids_reader.iter(.., |it| {
+            let mut values_iter_step_2 = it.values().step_by(2);
+
+            assert_eq!(Some(p1), values_iter_step_2.next_res()?);
+            assert_eq!(Some(p3), values_iter_step_2.next_res()?);
+            assert_eq!(None, values_iter_step_2.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(.., |it| {
+            let mut entries_iter_step_2 = it.step_by(2);
+
+            assert_eq!(Some((k1, p1)), entries_iter_step_2.next_res()?);
+            assert_eq!(Some((k3, p3)), entries_iter_step_2.next_res()?);
+            assert_eq!(None, entries_iter_step_2.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(k2.., |mut entries_iter| {
+            assert_eq!(Some((k2, p2)), entries_iter.next_res()?);
+            assert_eq!(Some((k3, p3)), entries_iter.next_res()?);
+            assert_eq!(None, entries_iter.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(..=k2, |mut entries_iter| {
+            assert_eq!(Some((k1, p1)), entries_iter.next_res()?);
+            assert_eq!(Some((k2, p2)), entries_iter.next_res()?);
+            assert_eq!(None, entries_iter.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(k2.., |entries_iter| {
+            let mut entries_iter_rev = entries_iter.reverse();
+
+            assert_eq!(Some((k3, p3)), entries_iter_rev.next_res()?);
+            assert_eq!(Some((k2, p2)), entries_iter_rev.next_res()?);
+            assert_eq!(None, entries_iter_rev.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(..=k2, |entries_iter| {
+            let mut entries_iter_rev = entries_iter.reverse();
+
+            assert_eq!(Some((k2, p2)), entries_iter_rev.next_res()?);
+            assert_eq!(Some((k1, p1)), entries_iter_rev.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
+
+        uids_reader.iter(..=k2, |it| {
+            let mut keys_iter_rev = it.keys().reverse();
+
+            assert_eq!(Some(k2), keys_iter_rev.next_res()?);
+            assert_eq!(Some(k1), keys_iter_rev.next_res()?);
+            assert_eq!(None, keys_iter_rev.next_res()?);
+            Ok::<(), KvError>(())
+        })?;
     }
 
     uids_writer.remove(k3)?;
 
-    let mut keys_iter_rev = uids_reader.iter(..).keys();
-    assert_eq!(Some(k1), keys_iter_rev.next_res()?);
-    assert_eq!(Some(k2), keys_iter_rev.next_res()?);
-    assert_eq!(None, keys_iter_rev.next_res()?);
+    uids_reader.iter(.., |it| {
+        let mut keys_iter = it.keys();
+
+        assert_eq!(Some(k1), keys_iter.next_res()?);
+        assert_eq!(Some(k2), keys_iter.next_res()?);
+        assert_eq!(None, keys_iter.next_res()?);
+        Ok::<(), KvError>(())
+    })?;
 
     Ok(())
 }
@@ -260,8 +306,14 @@ fn batch_test<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
         main_blocks_reader.get(&BlockNumberKeyV1(BlockNumber(1)))?,
         None
     );
-    assert_eq!(main_blocks_reader.iter(..).keys().next_res()?, None);
-    assert_eq!(main_blocks_reader.iter(..).values().next_res()?, None);
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.keys().next_res())?,
+        None
+    );
+    assert_eq!(
+        main_blocks_reader.iter(.., |it| it.values().next_res())?,
+        None
+    );
     if let Err(TryRecvError::Empty) = events_recv.try_recv() {
     } else {
         panic!("should not receive event");
@@ -276,7 +328,7 @@ fn batch_test<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
     // bo should written in batch
     assert_eq!(
         batch.main_blocks().get(&BlockNumberKeyV1(BlockNumber(0))),
-        Some(&b0)
+        BatchGet::Updated(&b0)
     );
 
     // bo should not written in db
@@ -309,20 +361,28 @@ fn batch_test<B: Backend>(db: &BcV1Db<B>) -> Result<()> {
             .as_ref(),
         Some(&b0)
     );
-    let mut keys_iter = db.main_blocks().iter(..).keys();
-    assert_eq!(
-        keys_iter.next_res()?,
-        Some(BlockNumberKeyV1(BlockNumber(0)))
-    );
-    assert_eq!(
-        keys_iter.next_res()?,
-        Some(BlockNumberKeyV1(BlockNumber(1)))
-    );
-    assert_eq!(keys_iter.next_res()?, None);
-    let mut values_iter = db.main_blocks().iter(..).values();
-    assert_eq!(values_iter.next_res()?.as_ref(), Some(&b0));
-    assert_eq!(values_iter.next_res()?.as_ref(), Some(&b1));
-    assert_eq!(values_iter.next_res()?, None);
+    db.main_blocks().iter(.., |it| {
+        let mut keys_iter = it.keys();
+
+        assert_eq!(
+            keys_iter.next_res()?,
+            Some(BlockNumberKeyV1(BlockNumber(0)))
+        );
+        assert_eq!(
+            keys_iter.next_res()?,
+            Some(BlockNumberKeyV1(BlockNumber(1)))
+        );
+        assert_eq!(keys_iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
+    db.main_blocks().iter(.., |it| {
+        let mut values_iter = it.values();
+
+        assert_eq!(values_iter.next_res()?.as_ref(), Some(&b0));
+        assert_eq!(values_iter.next_res()?.as_ref(), Some(&b1));
+        assert_eq!(values_iter.next_res()?, None);
+        Ok::<(), KvError>(())
+    })?;
     if let Ok(events) = events_recv.try_recv() {
         assert_eq!(events.len(), 2);
         assert!(assert_eq_pairs(
diff --git a/rust-libs/duniter-dbs/tests/test_tmp_real.rs b/rust-libs/duniter-dbs/tests/test_tmp_real.rs
index 0d675549b4bbedb279a85e63c619b8942d5f55d7..563a71d032f52f128803a6a85415135eaf6beea8 100644
--- a/rust-libs/duniter-dbs/tests/test_tmp_real.rs
+++ b/rust-libs/duniter-dbs/tests/test_tmp_real.rs
@@ -13,10 +13,10 @@
 // You should have received a copy of the GNU Affero General Public License
 // along with this program.  If not, see <https://www.gnu.org/licenses/>.
 
-use dubp::common::crypto::bases::b58::ToBase58 as _;
-use dubp::common::crypto::hashs::Hash;
-use dubp::common::crypto::keys::PublicKey;
-use dubp::common::prelude::*;
+/*use dubp_common::crypto::bases::b58::ToBase58 as _;
+use dubp_common::crypto::hashs::Hash;
+use dubp_common::crypto::keys::PublicKey;
+use dubp_common::prelude::*;
 use duniter_dbs::kv_typed::prelude::*;
 use duniter_dbs::*;
 use duniter_dbs::{
@@ -764,4 +764,4 @@ fn db_v1_sindex_conditions_on() -> Result<()> {
     }*/
 
     Ok(())
-}
+}*/
diff --git a/rust-libs/duniter-gva/Cargo.toml b/rust-libs/duniter-gva/Cargo.toml
index 2aee20edc443967e20850cc108ad65e76038e8d2..973c4db8d14f1f0bdb3c3cdda8946e51bf4e470b 100644
--- a/rust-libs/duniter-gva/Cargo.toml
+++ b/rust-libs/duniter-gva/Cargo.toml
@@ -11,7 +11,8 @@ async-graphql = "2.0.0"
 dubp = { version = "0.29.0" }
 duniter-dbs = { path = "../duniter-dbs" }
 duniter-dbs-read-ops = { path = "../duniter-dbs-read-ops" }
-duniter-dbs-writer = { path = "../duniter-dbs-writer" }
+duniter-mempools = { path = "../duniter-mempools" }
+fast-threadpool = "0.1.1"
 flume = "0.9.1"
 futures = "0.3.6"
 http = "0.2.1"
@@ -22,4 +23,5 @@ tokio = { version = "0.2.22", features = ["io-util", "rt-threaded", "stream"] }
 warp = "0.2"
 
 [dev-dependencies]
+duniter-dbs = { path = "../duniter-dbs", features = ["mem"] }
 unwrap = "1.2.1"
diff --git a/rust-libs/duniter-gva/src/lib.rs b/rust-libs/duniter-gva/src/lib.rs
index 01723b82092465e9bd72451d2a468b034296c73f..07d6247c46e252b46c739d1bb577e59e5e94aafb 100644
--- a/rust-libs/duniter-gva/src/lib.rs
+++ b/rust-libs/duniter-gva/src/lib.rs
@@ -34,8 +34,9 @@ use dubp::common::crypto::keys::{ed25519::PublicKey, PublicKey as _};
 use dubp::documents::prelude::*;
 use dubp::documents::transaction::{TransactionDocumentTrait, TransactionDocumentV10};
 use dubp::documents_parser::prelude::*;
-use duniter_dbs::{kv_typed::prelude::*, DbsRo, TxDbV2, TxsMpV2DbReadable};
-use duniter_dbs_writer::GvaWriter;
+use duniter_dbs::prelude::*;
+use duniter_dbs::{kv_typed::prelude::*, TxDbV2, TxsMpV2DbReadable};
+use duniter_mempools::TxsMempool;
 use futures::{StreamExt, TryStreamExt};
 use schema::GraphQlSchema;
 use std::convert::Infallible;
@@ -84,9 +85,11 @@ pub struct GvaServer;
 impl GvaServer {
     pub fn start(
         conf: GvaConf,
-        dbs_ro: DbsRo,
+        dbs: DuniterDbs,
+        dbs_pool: fast_threadpool::ThreadPoolAsyncHandler<DuniterDbs>,
+        server_pubkey: PublicKey,
         software_version: &'static str,
-        writer: GvaWriter,
+        txs_mempool: TxsMempool,
     ) -> Result<(), tokio::io::Error> {
         println!("TMP GvaServer::start: conf={:?}", conf);
         let mut runtime = tokio::runtime::Builder::new()
@@ -101,9 +104,11 @@ impl GvaServer {
                     schema::Subscription::default(),
                 )
                 .data(schema::SchemaData {
-                    dbs_ro,
+                    dbs,
+                    dbs_pool,
+                    server_pubkey,
                     software_version,
-                    writer,
+                    txs_mempool,
                 })
                 .extension(async_graphql::extensions::Logger)
                 .finish();
@@ -167,22 +172,26 @@ mod tests {
     use super::*;
     use duniter_dbs::kv_typed::backend::memory::{Mem, MemConf};
     use duniter_dbs::{GvaV1Db, GvaV1DbWritable, TxsMpV2Db, TxsMpV2DbWritable};
+    use fast_threadpool::ThreadPoolConfig;
     use unwrap::unwrap;
 
     #[test]
     #[ignore]
     fn launch_mem_gva() {
-        let gva_db_ro = unwrap!(GvaV1Db::<Mem>::open(MemConf::default())).get_ro_handler();
-        let txs_mp_db_ro = unwrap!(TxsMpV2Db::<Mem>::open(MemConf::default())).get_ro_handler();
+        let dbs = DuniterDbs {
+            gva_db: unwrap!(GvaV1Db::<Mem>::open(MemConf::default())),
+            txs_mp_db: unwrap!(TxsMpV2Db::<Mem>::open(MemConf::default())),
+        };
+        let threadpool =
+            fast_threadpool::ThreadPool::start(ThreadPoolConfig::default(), dbs.clone());
 
         unwrap!(GvaServer::start(
             GvaConf::default(),
-            DbsRo::Mem {
-                gva_db_ro,
-                txs_mp_db_ro,
-            },
+            dbs,
+            threadpool.into_async_handler(),
+            PublicKey::default(),
             "test",
-            GvaWriter::mock()
+            TxsMempool::new(10)
         ));
 
         std::thread::sleep(std::time::Duration::from_secs(120));
diff --git a/rust-libs/duniter-gva/src/resolvers/txs_history.rs b/rust-libs/duniter-gva/src/resolvers/txs_history.rs
index 96b2c0b388c47d5bbf3076b369327411bd925532..cfd9305ee895ffe93f560557e60b5f9d91f05b91 100644
--- a/rust-libs/duniter-gva/src/resolvers/txs_history.rs
+++ b/rust-libs/duniter-gva/src/resolvers/txs_history.rs
@@ -29,24 +29,16 @@ impl TxsHistoryQuery {
 
         let data = ctx.data::<SchemaData>()?;
 
-        let txs_history = match &data.dbs_ro {
-            DbsRo::File {
-                gva_db_ro,
-                txs_mp_db_ro,
-            } => duniter_dbs_read_ops::txs_history::get_transactions_history(
-                gva_db_ro,
-                txs_mp_db_ro,
-                pubkey,
-            )?,
-            DbsRo::Mem {
-                gva_db_ro,
-                txs_mp_db_ro,
-            } => duniter_dbs_read_ops::txs_history::get_transactions_history(
-                gva_db_ro,
-                txs_mp_db_ro,
-                pubkey,
-            )?,
-        };
+        let txs_history = data
+            .dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_read_ops::txs_history::get_transactions_history(
+                    &dbs.gva_db,
+                    &dbs.txs_mp_db,
+                    pubkey,
+                )
+            })
+            .await??;
 
         Ok(TxsHistoryGva {
             sent: txs_history
diff --git a/rust-libs/duniter-gva/src/resolvers/utxos.rs b/rust-libs/duniter-gva/src/resolvers/utxos.rs
index ff480c8e35f43512dbb6e64caf41825eb232f2fa..a88256729db093def61f2de72db4ad9a65d36ded 100644
--- a/rust-libs/duniter-gva/src/resolvers/utxos.rs
+++ b/rust-libs/duniter-gva/src/resolvers/utxos.rs
@@ -34,14 +34,10 @@ impl UtxosQuery {
 
         let data = ctx.data::<SchemaData>()?;
 
-        let utxos = match &data.dbs_ro {
-            DbsRo::File { gva_db_ro, .. } => {
-                duniter_dbs_read_ops::utxos::get_script_utxos(gva_db_ro, &script)?
-            }
-            DbsRo::Mem { gva_db_ro, .. } => {
-                duniter_dbs_read_ops::utxos::get_script_utxos(gva_db_ro, &script)?
-            }
-        };
+        let utxos = data
+            .dbs_pool
+            .execute(move |dbs| duniter_dbs_read_ops::utxos::get_script_utxos(&dbs.gva_db, &script))
+            .await??;
 
         let utxos: Vec<UtxoGva> = utxos
             .into_iter()
diff --git a/rust-libs/duniter-gva/src/schema.rs b/rust-libs/duniter-gva/src/schema.rs
index 4b89a35127f844dd53b8df3731fa0ba20746f1de..722fb4d10410b3d5738ac95235d34924bc37480d 100644
--- a/rust-libs/duniter-gva/src/schema.rs
+++ b/rust-libs/duniter-gva/src/schema.rs
@@ -17,9 +17,11 @@ use crate::*;
 
 pub(crate) type GraphQlSchema = async_graphql::Schema<Query, Mutation, Subscription>;
 pub(crate) struct SchemaData {
-    pub(crate) dbs_ro: DbsRo,
+    pub(crate) dbs: DuniterDbs,
+    pub(crate) dbs_pool: fast_threadpool::ThreadPoolAsyncHandler<DuniterDbs>,
+    pub(crate) server_pubkey: PublicKey,
     pub(crate) software_version: &'static str,
-    pub(crate) writer: GvaWriter,
+    pub(crate) txs_mempool: TxsMempool,
 }
 
 #[derive(async_graphql::MergedObject, Default)]
@@ -42,19 +44,16 @@ impl Subscription {
 
         let (s, r) = flume::unbounded();
 
-        match &data.dbs_ro {
-            DbsRo::File { txs_mp_db_ro, .. } => {
-                txs_mp_db_ro.txs().subscribe(s).expect("fail to access db")
-            }
-            DbsRo::Mem { txs_mp_db_ro, .. } => {
-                txs_mp_db_ro.txs().subscribe(s).expect("fail to access db")
-            }
-        }
+        data.dbs
+            .txs_mp_db
+            .txs()
+            .subscribe(s)
+            .expect("fail to access db");
 
         r.into_stream().filter_map(|events| {
             let mut txs = Vec::new();
             for event in events.deref() {
-                if let duniter_dbs::TxEvent::Upsert {
+                if let duniter_dbs::txs_mp_v2::TxEvent::Upsert {
                     value: ref pending_tx,
                     ..
                 } = event
@@ -77,48 +76,40 @@ pub struct Mutation;
 #[async_graphql::Object]
 impl Mutation {
     /// Process a transaction
-    /// Return false if the mempool is full
+    /// Return the transaction if it successfully inserted
     async fn tx(
         &self,
         ctx: &async_graphql::Context<'_>,
         raw_tx: String,
-    ) -> async_graphql::Result<bool> {
+    ) -> async_graphql::Result<TxGva> {
         let tx = TransactionDocumentV10::parse_from_raw_text(&raw_tx)?;
 
         tx.verify(None)?;
 
         let data = ctx.data::<SchemaData>()?;
 
-        let tx_already_exist = match &data.dbs_ro {
-            DbsRo::File { gva_db_ro, .. } => {
-                duniter_dbs_read_ops::txs_history::tx_exist(gva_db_ro, tx.get_hash())?
-            }
-            DbsRo::Mem { gva_db_ro, .. } => {
-                duniter_dbs_read_ops::txs_history::tx_exist(gva_db_ro, tx.get_hash())?
-            }
-        };
-
-        if tx_already_exist {
-            Err(async_graphql::Error::new(
-                "Transaction already written in blockchain",
-            ))
-        } else {
-            Ok(data
-                .writer
-                .add_pending_tx(tx)
-                .recv_async()
-                .await
-                .expect("dbs-writer disconnected")?)
-        }
+        let server_pubkey = data.server_pubkey;
+        let txs_mempool = data.txs_mempool;
+
+        let tx = data
+            .dbs_pool
+            .execute(move |dbs| {
+                txs_mempool
+                    .add_pending_tx(&dbs.gva_db, server_pubkey, &dbs.txs_mp_db, &tx)
+                    .map(|()| tx)
+            })
+            .await??;
+
+        Ok(TxGva::from(&tx))
     }
 
     /// Process several transactions
-    /// Return the numbers of transactions successfully inserted on mempool
+    /// Return an array of successfully inserted transactions
     async fn txs(
         &self,
         ctx: &async_graphql::Context<'_>,
         raw_txs: Vec<String>,
-    ) -> async_graphql::Result<u32> {
+    ) -> async_graphql::Result<Vec<TxGva>> {
         let txs = raw_txs
             .iter()
             .map(|raw_tx| TransactionDocumentV10::parse_from_raw_text(&raw_tx))
@@ -126,36 +117,23 @@ impl Mutation {
 
         let data = ctx.data::<SchemaData>()?;
 
-        for tx in &txs {
-            tx.verify(None)?;
-            if match &data.dbs_ro {
-                DbsRo::File { gva_db_ro, .. } => {
-                    duniter_dbs_read_ops::txs_history::tx_exist(gva_db_ro, tx.get_hash())?
-                }
-                DbsRo::Mem { gva_db_ro, .. } => {
-                    duniter_dbs_read_ops::txs_history::tx_exist(gva_db_ro, tx.get_hash())?
-                }
-            } {
-                return Err(async_graphql::Error::new(
-                    "Transaction already written in blockchain",
-                ));
-            }
-        }
+        let server_pubkey = data.server_pubkey;
+        let txs_mempool = data.txs_mempool;
 
-        let mut count = 0;
+        let mut processed_txs = Vec::with_capacity(txs.len());
         for tx in txs {
-            if data
-                .writer
-                .add_pending_tx(tx)
-                .recv_async()
-                .await
-                .expect("dbs-writer disconnected")?
-            {
-                count += 1;
-            } else {
-                return Ok(count);
-            }
+            tx.verify(None)?;
+            let tx = data
+                .dbs_pool
+                .execute(move |dbs| {
+                    txs_mempool
+                        .add_pending_tx(&dbs.gva_db, server_pubkey, &dbs.txs_mp_db, &tx)
+                        .map(|()| tx)
+                })
+                .await??;
+            processed_txs.push(TxGva::from(&tx));
         }
-        Ok(count)
+
+        Ok(processed_txs)
     }
 }
diff --git a/rust-libs/duniter-mempools/Cargo.toml b/rust-libs/duniter-mempools/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..7831fe948fe73e4a8964d130c427ec4b5321fcc9
--- /dev/null
+++ b/rust-libs/duniter-mempools/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "duniter-mempools"
+version = "0.1.0"
+authors = ["elois <elois@duniter.org>"]
+description = "Duniter mempools"
+repository = "https://git.duniter.org/nodes/typescript/duniter"
+keywords = ["dubp", "duniter", "blockchain", "mempool"]
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+dubp = { version = "0.29.0" }
+duniter-dbs = { path = "../duniter-dbs" }
+duniter-dbs-read-ops = { path = "../duniter-dbs-read-ops" }
+duniter-dbs-write-ops = { path = "../duniter-dbs-write-ops" }
+log = "0.4.11"
+thiserror = "1.0.20"
+
+[dev-dependencies]
diff --git a/rust-libs/duniter-mempools/src/lib.rs b/rust-libs/duniter-mempools/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f6e4b426cbbc0131a9dcad9c05b93f23907051b3
--- /dev/null
+++ b/rust-libs/duniter-mempools/src/lib.rs
@@ -0,0 +1,121 @@
+//  Copyright (C) 2020 Éloïs SANCHEZ.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+#![deny(
+    clippy::unwrap_used,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unstable_features,
+    unused_import_braces
+)]
+
+use std::borrow::Cow;
+
+use dubp::common::crypto::keys::ed25519::PublicKey;
+use dubp::documents::prelude::*;
+use dubp::documents::transaction::TransactionDocumentV10;
+use duniter_dbs::kv_typed::prelude::*;
+use duniter_dbs::{GvaV1DbReadable, TxsMpV2Db, TxsMpV2DbReadable};
+use thiserror::Error;
+
+#[derive(Debug, Error)]
+pub enum TxMpError {
+    #[error("{0}")]
+    Db(KvError),
+    #[error("Mempool full")]
+    Full,
+    #[error("Transaction already written in blockchain")]
+    TxAlreadyWritten,
+}
+
+impl From<KvError> for TxMpError {
+    fn from(e: KvError) -> Self {
+        TxMpError::Db(e)
+    }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct TxsMempool {
+    max_size: usize,
+}
+
+impl TxsMempool {
+    pub fn new(max_size: usize) -> Self {
+        TxsMempool { max_size }
+    }
+    pub fn accept_new_tx<GvaDb: GvaV1DbReadable, TxsMpDb: TxsMpV2DbReadable>(
+        &self,
+        gva_db_ro: &GvaDb,
+        server_pubkey: PublicKey,
+        tx: TransactionDocumentV10,
+        txs_mp_db_ro: &TxsMpDb,
+    ) -> Result<(), TxMpError> {
+        if duniter_dbs_read_ops::txs_history::tx_exist(gva_db_ro, tx.get_hash())? {
+            Err(TxMpError::TxAlreadyWritten)
+        } else if tx.issuers().contains(&server_pubkey)
+            || txs_mp_db_ro.txs().count()? < self.max_size
+        {
+            Ok(())
+        } else {
+            Err(TxMpError::Full)
+        }
+    }
+
+    pub fn add_pending_tx<B: Backend, GvaDb: GvaV1DbReadable>(
+        &self,
+        gva_db_ro: &GvaDb,
+        server_pubkey: PublicKey,
+        txs_mp_db: &TxsMpV2Db<B>,
+        tx: &TransactionDocumentV10,
+    ) -> Result<(), TxMpError> {
+        if duniter_dbs_read_ops::txs_history::tx_exist(gva_db_ro, tx.get_hash())? {
+            Err(TxMpError::TxAlreadyWritten)
+        } else if tx.issuers().contains(&server_pubkey) {
+            duniter_dbs_write_ops::add_pending_tx(|_, _| Ok(()), txs_mp_db, Cow::Borrowed(tx))?;
+            Ok(())
+        } else {
+            duniter_dbs_write_ops::add_pending_tx(
+                |_tx, txs| {
+                    if txs.count()? < self.max_size {
+                        Err(KvError::Custom(TxMpError::Full.into()))
+                    } else {
+                        Ok(())
+                    }
+                },
+                txs_mp_db,
+                Cow::Borrowed(tx),
+            )?;
+            Ok(())
+        }
+    }
+
+    #[doc(hidden)]
+    pub fn add_pending_tx_force<B: Backend>(
+        &self,
+        txs_mp_db: &TxsMpV2Db<B>,
+        tx: &TransactionDocumentV10,
+    ) -> KvResult<()> {
+        duniter_dbs_write_ops::add_pending_tx(|_, _| Ok(()), txs_mp_db, Cow::Borrowed(tx))?;
+        Ok(())
+    }
+
+    pub fn get_free_rooms<TxsMpDb: TxsMpV2DbReadable>(
+        &self,
+        txs_mp_db_ro: &TxsMpDb,
+    ) -> KvResult<usize> {
+        Ok(self.max_size - txs_mp_db_ro.txs().count()?)
+    }
+}
diff --git a/rust-libs/duniter-server/Cargo.toml b/rust-libs/duniter-server/Cargo.toml
index 84a1dfdbdde4a52a8a6ce5be45883a787501d972..358937e224382b3c1163f19ab257d83248e929f5 100644
--- a/rust-libs/duniter-server/Cargo.toml
+++ b/rust-libs/duniter-server/Cargo.toml
@@ -9,11 +9,14 @@ edition = "2018"
 dubp = { version = "0.29.0" }
 duniter-dbs = { path = "../duniter-dbs" }
 duniter-dbs-read-ops = { path = "../duniter-dbs-read-ops" }
-duniter-dbs-writer = { path = "../duniter-dbs-writer" }
+duniter-dbs-write-ops = { path = "../duniter-dbs-write-ops" }
 duniter-gva = { path = "../duniter-gva" }
+duniter-mempools = { path = "../duniter-mempools" }
+fast-threadpool = "0.1.1"
 flume = "0.9.1"
 log = "0.4.11"
 resiter = "0.4.0"
+rand = "0.7.3"
 
 [dev-dependencies]
 unwrap = "1.2.1"
diff --git a/rust-libs/duniter-server/src/conf.rs b/rust-libs/duniter-server/src/conf.rs
index d790361e453b894c7014554e7325f84b113dcd91..63ab05afd4131da648a2bd16d9bac1832930dd92 100644
--- a/rust-libs/duniter-server/src/conf.rs
+++ b/rust-libs/duniter-server/src/conf.rs
@@ -22,13 +22,16 @@ pub struct DuniterServerConf {
     pub txs_mempool_size: usize,
 }
 
-pub fn open_dbs<B: BackendConf>(home_path_opt: Option<&Path>) -> (GvaV1Db<B>, TxsMpV2Db<B>) {
-    (
-        GvaV1Db::<B>::open(B::gen_backend_conf("gva_v1", home_path_opt))
+pub fn open_dbs(home_path_opt: Option<&Path>) -> DuniterDbs {
+    DuniterDbs {
+        gva_db: GvaV1Db::<DbsBackend>::open(DbsBackend::gen_backend_conf("gva_v1", home_path_opt))
             .expect("fail to open GVA DB"),
-        TxsMpV2Db::<B>::open(B::gen_backend_conf("txs_mp_v2", home_path_opt))
-            .expect("fail to open TxsMp DB"),
-    )
+        txs_mp_db: TxsMpV2Db::<DbsBackend>::open(DbsBackend::gen_backend_conf(
+            "txs_mp_v2",
+            home_path_opt,
+        ))
+        .expect("fail to open TxsMp DB"),
+    }
 }
 
 pub trait BackendConf: Backend {
@@ -45,10 +48,27 @@ impl BackendConf for Mem {
     }
 }
 
+impl BackendConf for Lmdb {
+    #[inline(always)]
+    fn gen_backend_conf(db_name: &'static str, home_path_opt: Option<&Path>) -> LmdbConf {
+        let conf = LmdbConf::default();
+        if let Some(data_path) = home_path_opt {
+            conf.folder_path(data_path.join(format!("data/{}_lmdb", db_name)))
+        } else {
+            let random = rand::random::<u128>();
+            conf.folder_path(PathBuf::from(format!(
+                "/dev/shm/duniter/_{}/{}_lmdb",
+                random, db_name
+            )))
+            .temporary(true)
+        }
+    }
+}
+
 impl BackendConf for Sled {
     #[inline(always)]
-    fn gen_backend_conf(db_name: &'static str, home_path_opt: Option<&Path>) -> Config {
-        let conf = Config::default().flush_every_ms(Some(10_000));
+    fn gen_backend_conf(db_name: &'static str, home_path_opt: Option<&Path>) -> SledConf {
+        let conf = SledConf::default().flush_every_ms(Some(10_000));
         if let Some(data_path) = home_path_opt {
             conf.path(data_path.join(format!("data/{}_sled", db_name)))
         } else {
diff --git a/rust-libs/duniter-server/src/lib.rs b/rust-libs/duniter-server/src/lib.rs
index 3b2319ee32a0e3d34dcac8c7281ea286fff27143..16b5b844701bb62d47e29d46e66f7756ea8e8242 100644
--- a/rust-libs/duniter-server/src/lib.rs
+++ b/rust-libs/duniter-server/src/lib.rs
@@ -25,9 +25,10 @@
 mod conf;
 
 pub use duniter_dbs::smallvec;
+use duniter_mempools::{TxMpError, TxsMempool};
+use fast_threadpool::ThreadPoolConfig;
 
 pub use crate::conf::{BackendConf, DuniterServerConf};
-pub use duniter_dbs::TxEvent;
 pub use duniter_gva::GvaConf;
 
 use dubp::block::DubpBlockV10Stringified;
@@ -35,43 +36,26 @@ use dubp::common::crypto::hashs::Hash;
 use dubp::common::crypto::keys::ed25519::PublicKey;
 use dubp::common::prelude::*;
 use dubp::documents::{prelude::*, transaction::TransactionDocumentV10};
+use duniter_dbs::prelude::*;
 use duniter_dbs::{
-    kv_typed::backend::sled::{Config, Sled},
-    kv_typed::prelude::Arc,
+    kv_typed::backend::memory::{Mem, MemConf},
+    kv_typed::backend::sled::Sled,
     kv_typed::prelude::*,
-    //BlockNumberArrayV2, BlockNumberKeyV2, SourceAmountValV2, UtxosOfScriptV1
-    DbsRo,
-    GvaV1Db,
-    GvaV1DbReadable,
-    GvaV1DbWritable,
-    HashKeyV2,
-    PendingTxDbV2,
-    TxsMpV2Db,
-    TxsMpV2DbReadable,
-    TxsMpV2DbWritable,
+    GvaV1Db, GvaV1DbReadable, GvaV1DbWritable, HashKeyV2, PendingTxDbV2, TxsMpV2Db,
+    TxsMpV2DbReadable, TxsMpV2DbWritable,
 };
 use duniter_dbs_read_ops::txs_history::TxsHistory;
-use duniter_dbs_writer::{DbsWriter, DbsWriterMsg};
-use flume::Receiver;
 use resiter::filter::Filter;
-use std::{path::Path, thread::JoinHandle};
+use std::{
+    collections::BTreeMap,
+    path::{Path, PathBuf},
+};
 
 pub struct DuniterServer {
     conf: DuniterServerConf,
-    dbs_ro: DbsRo,
-    writer_sender: flume::Sender<DbsWriterMsg>,
-    writer_thread: Option<JoinHandle<()>>,
-}
-
-pub type TxsMpSubscriber = flume::Receiver<Arc<Events<TxEvent>>>;
-
-impl Drop for DuniterServer {
-    fn drop(&mut self) {
-        let _ = self.writer_sender.send(DbsWriterMsg::Stop);
-        if let Some(writer_thread) = self.writer_thread.take() {
-            let _ = writer_thread.join();
-        }
-    }
+    dbs_pool: fast_threadpool::ThreadPoolSyncHandler<DuniterDbs>,
+    pending_txs_subscriber: flume::Receiver<Arc<Events<duniter_dbs::txs_mp_v2::TxEvent>>>,
+    txs_mempool: TxsMempool,
 }
 
 impl DuniterServer {
@@ -79,14 +63,20 @@ impl DuniterServer {
         conf: DuniterServerConf,
         home_path_opt: Option<&Path>,
         software_version: &'static str,
-    ) -> (Self, TxsMpSubscriber) {
-        if home_path_opt.is_some() {
-            let (gva_db, txs_mp_db) = conf::open_dbs::<Sled>(home_path_opt);
-            let gva_db_ro = gva_db.get_ro_handler();
-            let txs_mp_db_ro = txs_mp_db.get_ro_handler();
+    ) -> Self {
+        let txs_mempool = TxsMempool::new(conf.txs_mempool_size);
+
+        let dbs = conf::open_dbs(home_path_opt);
+
+        let (s, pending_txs_subscriber) = flume::unbounded();
+        dbs.txs_mp_db
+            .txs()
+            .subscribe(s)
+            .expect("Fail to subscribe to txs col");
 
-            let (writer, writer_sender) = DbsWriter::new(gva_db, conf.server_pubkey, txs_mp_db);
-            let writer_thread = std::thread::spawn(move || writer.main_loop());
+        let threadpool = if home_path_opt.is_some() {
+            let threadpool =
+                fast_threadpool::ThreadPool::start(ThreadPoolConfig::default(), dbs.clone());
 
             if let Some(mut gva_conf) = conf.gva.clone() {
                 if let Some(remote_path) = std::env::var_os("DUNITER_GVA_REMOTE_PATH") {
@@ -98,61 +88,24 @@ impl DuniterServer {
                 }
                 duniter_gva::GvaServer::start(
                     gva_conf,
-                    DbsRo::File {
-                        gva_db_ro: gva_db_ro.clone(),
-                        txs_mp_db_ro: txs_mp_db_ro.clone(),
-                    },
+                    dbs,
+                    threadpool.async_handler(),
+                    conf.server_pubkey,
                     software_version,
-                    duniter_dbs_writer::GvaWriter::new(
-                        conf.txs_mempool_size,
-                        writer_sender.clone(),
-                    ),
+                    txs_mempool,
                 )
                 .expect("Fail to start GVA server");
             }
-
-            let (s, txs_mp_subscriber) = flume::unbounded();
-            txs_mp_db_ro
-                .txs()
-                .subscribe(s)
-                .expect("fail to subscribe to tx mempool");
-
-            (
-                DuniterServer {
-                    conf,
-                    dbs_ro: DbsRo::File {
-                        gva_db_ro,
-                        txs_mp_db_ro,
-                    },
-                    writer_sender,
-                    writer_thread: Some(writer_thread),
-                },
-                txs_mp_subscriber,
-            )
+            threadpool
         } else {
-            let (gva_db, txs_mp_db) = conf::open_dbs::<Mem>(home_path_opt);
-            let (s, txs_mp_subscriber) = flume::unbounded();
-            txs_mp_db
-                .txs()
-                .subscribe(s)
-                .expect("fail to subscribe to tx mempool");
-            let dbs_ro = DbsRo::Mem {
-                gva_db_ro: gva_db.get_ro_handler(),
-                txs_mp_db_ro: txs_mp_db.get_ro_handler(),
-            };
-
-            let (writer, writer_sender) = DbsWriter::new(gva_db, conf.server_pubkey, txs_mp_db);
-            let writer_thread = std::thread::spawn(move || writer.main_loop());
+            fast_threadpool::ThreadPool::start(ThreadPoolConfig::low(), dbs)
+        };
 
-            (
-                DuniterServer {
-                    conf,
-                    dbs_ro,
-                    writer_sender,
-                    writer_thread: Some(writer_thread),
-                },
-                txs_mp_subscriber,
-            )
+        DuniterServer {
+            conf,
+            dbs_pool: threadpool.into_sync_handler(),
+            pending_txs_subscriber,
+            txs_mempool,
         }
     }
 
@@ -164,168 +117,145 @@ impl DuniterServer {
         tx: TransactionDocumentV10,
         server_pubkey: PublicKey,
     ) -> KvResult<bool> {
-        if tx.issuers().contains(&server_pubkey) {
-            Ok(true)
-        } else {
-            let (s, r) = flume::bounded(0);
-            let _ = self.writer_sender.send(DbsWriterMsg::GetTxsMpLen(s));
-            let tx_mp_len = r.recv().expect("dbs writer disconnected");
-            Ok(tx_mp_len < self.conf.txs_mempool_size)
+        let txs_mempool = self.txs_mempool;
+        match self
+            .dbs_pool
+            .execute(move |dbs| {
+                txs_mempool.accept_new_tx(&dbs.gva_db, server_pubkey, tx, &dbs.txs_mp_db)
+            })
+            .expect("dbs pool discorrected")
+        {
+            Ok(()) => Ok(true),
+            Err(TxMpError::Db(e)) => Err(e),
+            Err(_) => Ok(false),
         }
     }
-    pub fn get_mempool_txs_free_rooms(&self) -> usize {
-        let (s, r) = flume::bounded(0);
-        let _ = self.writer_sender.send(DbsWriterMsg::GetTxsMpLen(s));
-        self.conf.txs_mempool_size - r.recv().expect("dbs writer disconnected")
+    pub fn get_mempool_txs_free_rooms(&self) -> KvResult<usize> {
+        let txs_mempool = self.txs_mempool;
+        self.dbs_pool
+            .execute(move |dbs| txs_mempool.get_free_rooms(&dbs.txs_mp_db))
+            .expect("dbs pool discorrected")
     }
     pub fn get_new_pending_txs(&self) -> KvResult<Vec<TransactionDocumentV10>> {
-        let (s, r) = flume::bounded(0);
-        let _ = self.writer_sender.send(DbsWriterMsg::GetNewPendingTxs(s));
-        let new_pending_txs = r.recv().expect("dbs writer disconnected");
-        Ok(new_pending_txs)
+        let mut new_pending_txs = BTreeMap::new();
+        for events in self.pending_txs_subscriber.drain() {
+            use std::ops::Deref as _;
+            for event in events.deref() {
+                match event {
+                    duniter_dbs::txs_mp_v2::TxEvent::Upsert { key, value } => {
+                        new_pending_txs.insert(key.0, value.0.clone());
+                    }
+                    duniter_dbs::txs_mp_v2::TxEvent::Remove { key } => {
+                        new_pending_txs.remove(&key.0);
+                    }
+                    _ => (),
+                }
+            }
+        }
+        Ok(new_pending_txs.into_iter().map(|(_k, v)| v).collect())
     }
     pub fn get_pending_txs(
         &self,
         _blockchain_time: i64,
         min_version: usize,
     ) -> KvResult<Vec<PendingTxDbV2>> {
-        match &self.dbs_ro {
-            DbsRo::File { txs_mp_db_ro, .. } => txs_mp_db_ro
-                .txs()
-                .iter(..)
-                .values()
-                .filter_ok(|tx| tx.0.version() >= min_version)
-                .collect(),
-            DbsRo::Mem { txs_mp_db_ro, .. } => txs_mp_db_ro
-                .txs()
-                .iter(..)
-                .values()
-                .filter_ok(|tx| tx.0.version() >= min_version)
-                .collect(),
-        }
+        self.dbs_pool
+            .execute(move |dbs| {
+                dbs.txs_mp_db.txs().iter(.., |it| {
+                    it.values()
+                        .filter_ok(|tx| tx.0.version() >= min_version)
+                        .collect()
+                })
+            })
+            .expect("dbs pool disconnected")
     }
 
     pub fn get_transactions_history(&self, pubkey: PublicKey) -> KvResult<TxsHistory> {
-        match &self.dbs_ro {
-            DbsRo::File {
-                gva_db_ro,
-                txs_mp_db_ro,
-            } => duniter_dbs_read_ops::txs_history::get_transactions_history(
-                gva_db_ro,
-                txs_mp_db_ro,
-                pubkey,
-            ),
-            DbsRo::Mem {
-                gva_db_ro,
-                txs_mp_db_ro,
-            } => duniter_dbs_read_ops::txs_history::get_transactions_history(
-                gva_db_ro,
-                txs_mp_db_ro,
-                pubkey,
-            ),
-        }
+        self.dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_read_ops::txs_history::get_transactions_history(
+                    &dbs.gva_db,
+                    &dbs.txs_mp_db,
+                    pubkey,
+                )
+            })
+            .expect("dbs pool disconnected")
     }
 
     pub fn get_tx_by_hash(
         &self,
         hash: Hash,
     ) -> KvResult<Option<(TransactionDocumentV10, Option<BlockNumber>)>> {
-        match &self.dbs_ro {
-            DbsRo::File {
-                gva_db_ro,
-                txs_mp_db_ro,
-            } => {
-                if let Some(tx) = txs_mp_db_ro.txs().get(&HashKeyV2(hash))? {
+        self.dbs_pool
+            .execute(move |dbs| {
+                if let Some(tx) = dbs.txs_mp_db.txs().get(&HashKeyV2(hash))? {
                     Ok(Some((tx.0, None)))
-                } else if let Some(tx_db) = gva_db_ro.txs().get(&HashKeyV2(hash))? {
+                } else if let Some(tx_db) = dbs.gva_db.txs().get(&HashKeyV2(hash))? {
                     Ok(Some((tx_db.tx, Some(tx_db.written_block.number))))
                 } else {
                     Ok(None)
                 }
-            }
-            DbsRo::Mem {
-                gva_db_ro,
-                txs_mp_db_ro,
-            } => {
-                if let Some(tx) = txs_mp_db_ro.txs().get(&HashKeyV2(hash))? {
-                    Ok(Some((tx.0, None)))
-                } else if let Some(tx_db) = gva_db_ro.txs().get(&HashKeyV2(hash))? {
-                    Ok(Some((tx_db.tx, Some(tx_db.written_block.number))))
-                } else {
-                    Ok(None)
-                }
-            }
-        }
+            })
+            .expect("dbs pool disconnected")
     }
 
     /*
-     * WRITE FUNCTION FOR GVA (AND DUNITER JS via WS2P and BMA)
+     * WRITE FUNCTION FOR DUNITER JS ONLY
      */
-    // force : false for GVA and true for DUNITER JS
-    pub fn add_pending_tx(
-        &self,
-        tx: TransactionDocumentV10,
-        force: bool,
-    ) -> Receiver<KvResult<bool>> {
-        let max_tx_mp_size_opt = if force {
-            None
-        } else {
-            Some(self.conf.txs_mempool_size)
-        };
-        let (sender, receiver) = flume::bounded(0);
-        let _ = self.writer_sender.send(DbsWriterMsg::AddPendingTx {
-            tx,
-            max_tx_mp_size_opt,
-            sender,
-        });
-        receiver
+    pub fn add_pending_tx_force(&self, tx: TransactionDocumentV10) -> KvResult<()> {
+        let txs_mempool = self.txs_mempool;
+        self.dbs_pool
+            .execute(move |dbs| txs_mempool.add_pending_tx_force(&dbs.txs_mp_db, &tx))
+            .expect("dbs pool disconnected")
     }
 
-    /*
-     * WRITE FUNCTIONS FOR DUNITER JS ONLY
-     */
     pub fn remove_all_pending_txs(&self) -> KvResult<()> {
-        let (s, r) = flume::bounded(0);
-        let _ = self
-            .writer_sender
-            .send(DbsWriterMsg::RemoveAllPendingTxs(s));
-        r.recv().expect("dbs writer disconnected")
+        self.dbs_pool
+            .execute(move |dbs| duniter_dbs_write_ops::remove_all_pending_txs(&dbs.txs_mp_db))
+            .expect("dbs pool disconnected")
     }
     pub fn remove_pending_tx_by_hash(&self, hash: Hash) -> KvResult<()> {
-        let (s, r) = flume::bounded(0);
-        let _ = self
-            .writer_sender
-            .send(DbsWriterMsg::RemovePendingTxByHash(hash, s));
-        r.recv().expect("dbs writer disconnected")
+        self.dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_write_ops::remove_pending_tx_by_hash(&dbs.txs_mp_db, hash)
+            })
+            .expect("dbs pool disconnected")
     }
     pub fn revert_block(&self, block: DubpBlockV10Stringified) -> KvResult<()> {
-        let (sender, r) = flume::bounded(0);
-        let _ = self
-            .writer_sender
-            .send(DbsWriterMsg::RevertBlock { block, sender });
-        r.recv().expect("dbs writer disconnected")
+        let gva = self.conf.gva.is_some();
+        self.dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_write_ops::revert_block(&dbs.gva_db, &dbs.txs_mp_db, block, gva)
+            })
+            .expect("dbs pool disconnected")
     }
     pub fn apply_block(&self, block: DubpBlockV10Stringified) -> KvResult<()> {
-        let (sender, r) = flume::bounded(0);
-        let _ = self
-            .writer_sender
-            .send(DbsWriterMsg::ApplyBlock { block, sender });
-        r.recv().expect("dbs writer disconnected")
+        let gva = self.conf.gva.is_some();
+        self.dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_write_ops::apply_block(&dbs.gva_db, &dbs.txs_mp_db, block, gva)
+            })
+            .expect("dbs pool disconnected")
     }
     pub fn apply_chunk_of_blocks(&self, blocks: Vec<DubpBlockV10Stringified>) -> KvResult<()> {
-        let (sender, r) = flume::bounded(0);
-        let _ = self
-            .writer_sender
-            .send(DbsWriterMsg::ApplyChunkOfBlocks { blocks, sender });
-        r.recv()
-            .expect("apply_chunk_of_blocks: dbs writer disconnected")
+        let gva = self.conf.gva.is_some();
+        self.dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_write_ops::apply_chunk_of_blocks(
+                    &dbs.gva_db,
+                    &dbs.txs_mp_db,
+                    blocks,
+                    gva,
+                )
+            })
+            .expect("dbs pool disconnected")
     }
     pub fn trim_expired_non_written_txs(&self, limit_time: i64) -> KvResult<()> {
-        let (sender, r) = flume::bounded(0);
-        let _ = self
-            .writer_sender
-            .send(DbsWriterMsg::TrimExpiredNonWrittenTxs { limit_time, sender });
-        r.recv().expect("dbs writer disconnected")
+        self.dbs_pool
+            .execute(move |dbs| {
+                duniter_dbs_write_ops::trim_expired_non_written_txs(&dbs.txs_mp_db, limit_time)
+            })
+            .expect("dbs pool disconnected")
     }
 }
 
@@ -336,8 +266,8 @@ mod tests {
     use dubp::documents::transaction::TransactionDocumentV10Builder;
 
     #[test]
-    fn test_txs_history() {
-        let (server, _) = DuniterServer::start(
+    fn test_txs_history() -> KvResult<()> {
+        let server = DuniterServer::start(
             DuniterServerConf {
                 gva: None,
                 server_pubkey: PublicKey::default(),
@@ -359,17 +289,17 @@ mod tests {
             hash: None,
         }
         .build_with_signature(smallvec![]);
-        server
-            .add_pending_tx(tx.clone(), true)
-            .recv()
-            .expect("server disconnected")
-            .expect("fail to add pending tx");
+        server.add_pending_tx_force(tx.clone())?;
 
-        let txs_history = server
-            .get_transactions_history(PublicKey::default())
-            .expect("fail to get txs history");
+        let txs_history = server.get_transactions_history(PublicKey::default())?;
 
         tx.get_hash();
-        assert_eq!(txs_history.sending, vec![tx])
+        assert_eq!(txs_history.sending, vec![tx]);
+
+        server.remove_all_pending_txs()?;
+
+        assert_eq!(server.get_pending_txs(0, 0)?.len(), 0);
+
+        Ok(())
     }
 }
diff --git a/rust-libs/tools/kv_typed/Cargo.toml b/rust-libs/tools/kv_typed/Cargo.toml
index 6f7b52371860d64d0f2ad7d436b943c97d616749..68cb443dde41454b4efa46598198a5e1eaf69da6 100644
--- a/rust-libs/tools/kv_typed/Cargo.toml
+++ b/rust-libs/tools/kv_typed/Cargo.toml
@@ -12,7 +12,6 @@ edition = "2018"
 path = "src/lib.rs"
 
 [dependencies]
-blake3 = { version = "0.3.7", optional = true }
 cfg-if = "0.1.10"
 flume = "0.9.1"
 kv_typed_code_gen = { path = "../kv_typed_code_gen" }
@@ -35,7 +34,7 @@ lmdb-zero = "0.4.4"
 [[bench]]
 name = "compare_backends"
 harness = false
-required-features = ["leveldb_backend", "memory_backend", "sled_backend"]
+required-features = ["leveldb_backend", "sled_backend"]
 
 [dev-dependencies]
 async-std = { version = "1.6.3", features = ["attributes"] }
@@ -48,16 +47,14 @@ unwrap = "1.2.1"
 criterion = { version = "0.3.1" }
 
 [features]
-#default = ["memory_backend"]
+default = ["sled_backend"]
 
 async = []
 explorer = ["rayon", "regex", "serde_json"]
 leveldb_backend = ["leveldb_minimal"]
-memory_backend = ["blake3"]
 sled_backend = ["sled"]
 
 #mock = ["mockall"]
 
-default = ["memory_backend", "sled_backend"]
-#default = ["memory_backend", "explorer"]
-#default = ["memory_backend",  "mock"]
+#default = ["explorer"]
+#default = ["mock"]
diff --git a/rust-libs/tools/kv_typed/src/backend.rs b/rust-libs/tools/kv_typed/src/backend.rs
index 81af043ea130184169c2f14a36ae2d0264727498..d6b9eefa7f814d4eb51802d7906ebf2c7b56fcce 100644
--- a/rust-libs/tools/kv_typed/src/backend.rs
+++ b/rust-libs/tools/kv_typed/src/backend.rs
@@ -19,7 +19,6 @@
 pub mod leveldb;
 #[cfg(target_arch = "x86_64")]
 pub mod lmdb;
-#[cfg(feature = "memory_backend")]
 pub mod memory;
 #[cfg(feature = "mock")]
 pub mod mock;
diff --git a/rust-libs/tools/kv_typed/src/backend/lmdb.rs b/rust-libs/tools/kv_typed/src/backend/lmdb.rs
index 64dcd1f8e42801111c055141b3289298b1eb3de7..1190fd5502cfa7800b8d4de9d3727b4cfb1e1e4c 100644
--- a/rust-libs/tools/kv_typed/src/backend/lmdb.rs
+++ b/rust-libs/tools/kv_typed/src/backend/lmdb.rs
@@ -27,16 +27,28 @@ use std::path::PathBuf;
 /// 2. If you are in an asynchronous context, an async task should never yield when it to an instantiated iterator.
 pub struct Lmdb;
 
-#[derive(Clone, Debug, Default)]
+#[derive(Clone, Debug)]
 pub struct LmdbConf {
     folder_path: PathBuf,
+    temporary: bool,
+}
+impl Default for LmdbConf {
+    fn default() -> Self {
+        LmdbConf {
+            folder_path: PathBuf::default(),
+            temporary: false,
+        }
+    }
 }
-
 impl LmdbConf {
     pub fn folder_path(mut self, folder_path: PathBuf) -> Self {
         self.folder_path = folder_path;
         self
     }
+    pub fn temporary(mut self, temporary: bool) -> Self {
+        self.temporary = temporary;
+        self
+    }
 }
 
 impl Backend for Lmdb {
@@ -54,6 +66,11 @@ impl Backend for Lmdb {
         if !exist {
             std::fs::create_dir(path.as_path())?;
         }
+        let path_to_remove = if conf.temporary {
+            Some(path.clone())
+        } else {
+            None
+        };
         let path = path
             .into_os_string()
             .into_string()
@@ -70,12 +87,26 @@ impl Backend for Lmdb {
         let env =
             std::sync::Arc::new(unsafe { lmdb::EnvBuilder::new()?.open(&path, env_flags, 0o600)? });
         let tree = std::sync::Arc::new(lmdb::Database::open(env.clone(), None, &col_options)?);
-        Ok(LmdbCol(LmdbColInner { env, tree }))
+        Ok(LmdbCol {
+            inner: LmdbColInner { env, tree },
+            path_to_remove,
+        })
     }
 }
 
 #[derive(Clone, Debug)]
-pub struct LmdbCol(LmdbColInner);
+pub struct LmdbCol {
+    inner: LmdbColInner,
+    path_to_remove: Option<PathBuf>,
+}
+
+impl Drop for LmdbCol {
+    fn drop(&mut self) {
+        if let Some(ref path) = self.path_to_remove {
+            let _ = std::fs::remove_dir(path);
+        }
+    }
+}
 
 #[derive(Clone, Debug)]
 struct LmdbColInner {
@@ -233,11 +264,11 @@ impl BackendCol for LmdbCol {
     type Iter = LmdbIter;
 
     fn get<K: Key, V: Value>(&self, k: &K) -> KvResult<Option<V>> {
-        let tx = lmdb::ReadTransaction::new(self.0.tree.env())?;
+        let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
         let access = tx.access();
         k.as_bytes(|k_bytes| {
             access
-                .get(&self.0.tree, k_bytes)
+                .get(&self.inner.tree, k_bytes)
                 .to_opt()?
                 .map(|bytes| {
                     V::from_bytes(&bytes).map_err(|e| KvError::DeserError(format!("{}", e)))
@@ -252,10 +283,10 @@ impl BackendCol for LmdbCol {
         f: F,
     ) -> KvResult<Option<D>> {
         k.as_bytes(|k_bytes| {
-            let tx = lmdb::ReadTransaction::new(self.0.tree.env())?;
+            let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
             let access = tx.access();
             access
-                .get::<_, [u8]>(&self.0.tree, k_bytes)
+                .get::<_, [u8]>(&self.inner.tree, k_bytes)
                 .to_opt()?
                 .map(|bytes| {
                     if let Some(layout_verified) = zerocopy::LayoutVerified::<_, V::Ref>::new(bytes)
@@ -277,10 +308,10 @@ impl BackendCol for LmdbCol {
         f: F,
     ) -> KvResult<Option<D>> {
         k.as_bytes(|k_bytes| {
-            let tx = lmdb::ReadTransaction::new(self.0.tree.env())?;
+            let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
             let access = tx.access();
             access
-                .get::<_, [u8]>(&self.0.tree, k_bytes)
+                .get::<_, [u8]>(&self.inner.tree, k_bytes)
                 .to_opt()?
                 .map(|bytes| {
                     if let Some(layout_verified) =
@@ -300,30 +331,35 @@ impl BackendCol for LmdbCol {
     }
 
     fn clear(&mut self) -> KvResult<()> {
-        let tx = lmdb::WriteTransaction::new(self.0.tree.env())?;
+        let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
         {
             let mut access = tx.access();
-            access.clear_db(&self.0.tree)?;
+            access.clear_db(&self.inner.tree)?;
         }
         tx.commit()?;
         Ok(())
     }
 
     fn count(&self) -> KvResult<usize> {
-        let tx = lmdb::ReadTransaction::new(self.0.tree.env())?;
-        Ok(tx.db_stat(&self.0.tree)?.entries)
+        let tx = lmdb::ReadTransaction::new(self.inner.tree.env())?;
+        Ok(tx.db_stat(&self.inner.tree)?.entries)
     }
 
     fn iter<K: Key, V: Value>(&self, _range: RangeBytes) -> Self::Iter {
-        LmdbIter::new(self.0.env.clone(), self.0.tree.clone())
+        LmdbIter::new(self.inner.env.clone(), self.inner.tree.clone())
     }
 
     fn put<K: Key, V: Value>(&mut self, k: &K, value: &V) -> KvResult<()> {
         value.as_bytes(|v_bytes| {
-            let tx = lmdb::WriteTransaction::new(self.0.tree.env())?;
+            let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
             k.as_bytes(|k_bytes| {
                 let mut access = tx.access();
-                access.put(&self.0.tree, k_bytes, v_bytes, lmdb::put::Flags::empty())
+                access.put(
+                    &self.inner.tree,
+                    k_bytes,
+                    v_bytes,
+                    lmdb::put::Flags::empty(),
+                )
             })?;
             tx.commit()?;
             Ok(())
@@ -331,10 +367,10 @@ impl BackendCol for LmdbCol {
     }
 
     fn delete<K: Key>(&mut self, k: &K) -> KvResult<()> {
-        let tx = lmdb::WriteTransaction::new(self.0.tree.env())?;
+        let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
         k.as_bytes(|k_bytes| {
             let mut access = tx.access();
-            access.del_key(&self.0.tree, k_bytes).to_opt()
+            access.del_key(&self.inner.tree, k_bytes).to_opt()
         })?;
         tx.commit()?;
         Ok(())
@@ -345,19 +381,19 @@ impl BackendCol for LmdbCol {
     }
 
     fn write_batch(&mut self, inner_batch: Self::Batch) -> KvResult<()> {
-        let tx = lmdb::WriteTransaction::new(self.0.tree.env())?;
+        let tx = lmdb::WriteTransaction::new(self.inner.tree.env())?;
         {
             let mut access = tx.access();
             for (k, v) in inner_batch.upsert_ops {
                 access.put(
-                    &self.0.tree,
+                    &self.inner.tree,
                     k.as_ref(),
                     v.as_ref(),
                     lmdb::put::Flags::empty(),
                 )?;
             }
             for k in inner_batch.remove_ops {
-                access.del_key(&self.0.tree, k.as_ref()).to_opt()?;
+                access.del_key(&self.inner.tree, k.as_ref()).to_opt()?;
             }
         }
         tx.commit()?;
@@ -365,6 +401,6 @@ impl BackendCol for LmdbCol {
     }
 
     fn save(&self) -> KvResult<()> {
-        Ok(self.0.tree.env().sync(true)?)
+        Ok(self.inner.tree.env().sync(true)?)
     }
 }
diff --git a/rust-libs/tools/kv_typed/src/backend/memory.rs b/rust-libs/tools/kv_typed/src/backend/memory.rs
index d5e57e487e202087b491960b3bb10edda96cab50..ccc0124809dd7f35dca0939f79333844b493a914 100644
--- a/rust-libs/tools/kv_typed/src/backend/memory.rs
+++ b/rust-libs/tools/kv_typed/src/backend/memory.rs
@@ -17,7 +17,7 @@
 
 use crate::*;
 use std::collections::BTreeMap;
-use uninit::extension_traits::VecCapacity as _;
+//use uninit::extension_traits::VecCapacity as _;
 
 #[derive(Clone, Copy, Debug)]
 pub struct Mem;
@@ -39,15 +39,15 @@ impl Backend for Mem {
     fn open(_conf: &Self::Conf) -> KvResult<Self> {
         Ok(Mem)
     }
-    fn open_col(&mut self, conf: &Self::Conf, col_name: &str) -> KvResult<Self::Col> {
-        if let Some(ref folder_path) = conf.folder_path {
+    fn open_col(&mut self, _conf: &Self::Conf, _col_name: &str) -> KvResult<Self::Col> {
+        /*if let Some(ref folder_path) = conf.folder_path {
             MemCol::from_file(folder_path.join(col_name))
-        } else {
-            Ok(MemCol {
-                path: None,
-                tree: BTreeMap::new(),
-            })
-        }
+        } else {*/
+        Ok(MemCol {
+            path: None,
+            tree: BTreeMap::new(),
+        })
+        //}
     }
 }
 
@@ -73,7 +73,7 @@ pub struct MemCol {
     tree: Tree,
 }
 
-impl MemCol {
+/*impl MemCol {
     fn from_file(file_path: std::path::PathBuf) -> KvResult<Self> {
         let mut file = std::fs::File::open(file_path.as_path())?;
         let bytes = Vec::<u8>::new();
@@ -219,7 +219,7 @@ impl MemCol {
 
         bytes
     }
-}
+}*/
 
 impl BackendCol for MemCol {
     type Batch = MemBatch;
@@ -334,7 +334,7 @@ impl BackendCol for MemCol {
     }
     #[inline(always)]
     fn save(&self) -> KvResult<()> {
-        if let Some(ref file_path) = self.path {
+        /*if let Some(ref file_path) = self.path {
             let bytes = Self::tree_to_bytes(&self.tree);
 
             let mut file =
@@ -342,7 +342,7 @@ impl BackendCol for MemCol {
             use std::io::Write as _;
             file.write_all(&bytes[..])
                 .map_err(|e| KvError::BackendError(e.into()))?;
-        }
+        }*/
 
         Ok(())
     }
@@ -395,7 +395,7 @@ impl BackendIter<IVec, IVec> for MemIter {}
 
 #[cfg(test)]
 mod tests {
-    use super::*;
+    /*use super::*;
 
     #[test]
     fn test_save() -> KvResult<()> {
@@ -426,5 +426,5 @@ mod tests {
         assert_eq!(tree2.get(&k4), Some(&v4));
 
         Ok(())
-    }
+    }*/
 }
diff --git a/rust-libs/tools/kv_typed/src/lib.rs b/rust-libs/tools/kv_typed/src/lib.rs
index 21800d9eed521ba7d8d993a2d3ef5905539e49c6..0a4ac7072fb38511a62d44bd211d2b0293c27dbe 100644
--- a/rust-libs/tools/kv_typed/src/lib.rs
+++ b/rust-libs/tools/kv_typed/src/lib.rs
@@ -59,7 +59,6 @@ pub mod prelude {
     pub use crate::backend::leveldb::{LevelDb, LevelDbConf};
     #[cfg(target_arch = "x86_64")]
     pub use crate::backend::lmdb::{Lmdb, LmdbConf};
-    #[cfg(feature = "memory_backend")]
     pub use crate::backend::memory::{Mem, MemConf};
     #[cfg(feature = "mock")]
     pub use crate::backend::mock::{MockBackend, MockBackendCol, MockBackendIter};
diff --git a/rust-libs/tools/kv_typed/tests/db_schema.rs b/rust-libs/tools/kv_typed/tests/db_schema.rs
index 4b635aab689a0c0ed647c846c5045b823116d5bd..d0d49b745d7a354bd047cadc0a3556342b861ca1 100644
--- a/rust-libs/tools/kv_typed/tests/db_schema.rs
+++ b/rust-libs/tools/kv_typed/tests/db_schema.rs
@@ -44,7 +44,9 @@ mod tests {
 
     #[test]
     fn test_db_schema_mem() -> KvResult<()> {
-        let db = TestV1Db::<Mem>::open(MemConf::default())?;
+        let db = TestV1Db::<kv_typed::backend::memory::Mem>::open(
+            kv_typed::backend::memory::MemConf::default(),
+        )?;
 
         test_db_schema(&db)
     }
diff --git a/server.ts b/server.ts
index 36b04969a4547f5be0620d0eb39ed0f3bcf97e6d..46fcc19d01dde9e8256770bbb4bcd7d30f777daa 100644
--- a/server.ts
+++ b/server.ts
@@ -45,7 +45,7 @@ import {LevelUp} from "levelup";
 import {BMAConstants} from "./app/modules/bma/lib/constants"
 import {HttpMilestonePage} from "./app/modules/bma/lib/dtos"
 import * as toJson from "./app/modules/bma/lib/tojson"
-import { rawTxParseAndVerify, RustEventEmitter, txVerify } from "./neon/lib"
+import { rawTxParseAndVerify, txVerify } from "./neon/lib"
 import { TransactionDTOV10 } from "./neon/native"
 
 export interface HookableServer {
@@ -85,7 +85,6 @@ export class Server extends stream.Duplex implements HookableServer {
   keyPair:any
   sign:any
   blockchain:any
-  rustEventEmitter: RustEventEmitter | null = null;
 
   MerkleService:(req:any, merkle:any, valueCoroutine:any) => any
   IdentityService:IdentityService
@@ -367,17 +366,6 @@ export class Server extends stream.Duplex implements HookableServer {
       }
     }
 
-    // Listen rust events
-    //console.log("TMP: Create RustEventEmitter()");
-    this.rustEventEmitter = new RustEventEmitter();
-    this.rustEventEmitter.on('txs', ({ txs }) => {
-      //console.log("TMP: receive txs from rust !");
-      txs.map((tx: TransactionDTOV10) => this.emitDocument(
-        TransactionDTO.fromTransactionDTOV10(tx), 
-        DuniterDocument.ENTITY_TRANSACTION)
-      );
-    });
-
     setInterval(() => {
       if (this.ws2pCluster) {
         let txs = this.dal.getNewPendingTxs();
diff --git a/test/dal/basic-dal-tests.ts b/test/dal/basic-dal-tests.ts
index 90b37f94792eeed33730cc6d3c30ffd4eaaf4b34..8fc3836d2d9c1668d5a4b47a3c6b46ddfdbc0b97 100644
--- a/test/dal/basic-dal-tests.ts
+++ b/test/dal/basic-dal-tests.ts
@@ -17,6 +17,7 @@ import {Directory} from "../../app/lib/system/directory"
 import {DBBlock} from "../../app/lib/db/DBBlock"
 import {Underscore} from "../../app/lib/common-libs/underscore"
 import { ConfDTO } from "../../app/lib/dto/ConfDTO"
+import { BlockDTO } from "../../app/lib/dto/BlockDTO"
 
 var should = require('should');
 var assert = require('assert');
@@ -33,6 +34,10 @@ var mocks = {
     ]
   },
   block0: {
+    "issuersCount": 0,
+    "issuersFrame": 0,
+    "issuersFrameVar": 0,
+    "fork": false,
     "hash" : "00063EB6E83F8717CEF1D25B3E2EE30800063EB6E83F8717CEF1D25B3E2EE308",
     "signature" : "+78w7251vvRdhoIJ6IWHEiEOLxNrmfQf45Y5sYvPdnAdXkVpO1unMV5YA/G5Vhphyz1dICrbeKCPM5qbFsoWAQ==",
     "version" : constants.BLOCK_GENESIS_VERSION,
@@ -152,7 +157,7 @@ describe("DAL", function(){
   });
 
   it('should be able to save a Block', async () => {
-    await fileDAL.saveBlock(Underscore.extend({ fork: false } as any, mocks.block0), ConfDTO.mock());
+    await fileDAL.saveBlock(DBBlock.fromBlockDTO(BlockDTO.fromJSONObject(mocks.block0)), ConfDTO.mock());
     let block = (await fileDAL.getFullBlockOf(0)) as DBBlock
     block.should.have.property('hash').equal(mocks.block0.hash);
     block.should.have.property('signature').equal(mocks.block0.signature);
@@ -164,7 +169,6 @@ describe("DAL", function(){
     block.should.have.property('previousIssuer').equal(mocks.block0.previousIssuer);
     block.should.have.property('membersCount').equal(mocks.block0.membersCount);
     block.should.have.property('monetaryMass').equal(mocks.block0.monetaryMass);
-    block.should.have.property('UDTime').equal(mocks.block0.UDTime);
     block.should.have.property('medianTime').equal(mocks.block0.medianTime);
     block.should.have.property('dividend').equal(mocks.block0.dividend);
     block.should.have.property('unitbase').equal(mocks.block0.unitbase);
diff --git a/test/integration/misc/cli.ts b/test/integration/misc/cli.ts
index 9f142e4b321f4eb7faabce5a0c65327867009d56..c14855731829a26c2197c9fc5823994db8dcb024 100644
--- a/test/integration/misc/cli.ts
+++ b/test/integration/misc/cli.ts
@@ -123,14 +123,6 @@ describe("CLI", function() {
     // const res = await execute(['export-bc', '--nostdout']);
     // res.slice(0, 1).should.have.length(0);
   })
-
-  it('sync 7 blocks (fast)', async () => {
-    // await execute(['reset', 'data']);
-    await execute(['sync', fakeServer.host + ':' + String(fakeServer.port), '--nocautious', '--nointeractive', '--noshuffle', '--localsync', '7']);
-    const res = await execute(['export-bc', '--nostdout']);
-    res[res.length - 1].should.have.property('number').equal(7);
-    res.should.have.length(7 + 1); // blocks #0..#7
-  })
 });
 
 /**