diff --git a/Cargo.lock b/Cargo.lock
index 8e35fc861776771821ebacd5f337cf572e7f35e4..a883de680d9046ab9a18bb8695501e736947e82f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -526,6 +526,7 @@ dependencies = [
 name = "durs-blockchain-dal"
 version = "0.3.0-dev"
 dependencies = [
+ "bincode 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "dubp-block-doc 0.1.0",
  "dubp-common-doc 0.1.0",
  "dubp-currency-params 0.2.0",
@@ -534,21 +535,36 @@ dependencies = [
  "dubp-user-docs-tests-tools 0.1.0",
  "dup-crypto 0.7.0",
  "dup-crypto-tests-tools 0.1.0",
+ "durs-common-dal 0.1.0-a",
  "durs-common-tests-tools 0.1.0",
  "durs-common-tools 0.2.0",
  "durs-conf 0.3.0-dev",
  "durs-module 0.3.0-dev",
  "durs-wot 0.8.0-a0.9",
  "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "id_tree 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustbreak 2.0.0-rc3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rkv 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)",
  "unwrap 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
+[[package]]
+name = "durs-common-dal"
+version = "0.1.0-a"
+dependencies = [
+ "bincode 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "durs-common-tools 0.2.0",
+ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rkv 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustbreak 2.0.0-rc3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unwrap 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
 [[package]]
 name = "durs-common-tests-tools"
 version = "0.1.0"
@@ -960,14 +976,6 @@ dependencies = [
  "uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
-[[package]]
-name = "id_tree"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "snowflake 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "idna"
 version = "0.2.0"
@@ -1053,6 +1061,27 @@ dependencies = [
  "vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
+[[package]]
+name = "lmdb-rkv"
+version = "0.11.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lmdb-rkv-sys 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "lmdb-rkv-sys"
+version = "0.8.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "cc 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
 [[package]]
 name = "log"
 version = "0.4.8"
@@ -1215,6 +1244,14 @@ dependencies = [
  "vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
+[[package]]
+name = "ordered-float"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
 [[package]]
 name = "os_type"
 version = "2.2.0"
@@ -1571,6 +1608,25 @@ dependencies = [
  "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
+[[package]]
+name = "rkv"
+version = "0.9.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bincode 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lmdb-rkv 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ordered-float 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_derive 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)",
+ "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
 [[package]]
 name = "rpassword"
 version = "1.0.2"
@@ -2231,7 +2287,6 @@ dependencies = [
 "checksum hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695"
 "checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9"
 "checksum human-panic 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "21638c5955a6daf3ecc42cae702335fc37a72a4abcc6959ce457b31a7d43bbdd"
-"checksum id_tree 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1bb51d6d2c77a59bfe64a6e909a00140e680fc9a32c6f383e64ed462b3cab957"
 "checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9"
 "checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
 "checksum itertools 0.7.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0d47946d458e94a1b7bcabbf6521ea7c037062c81f534615abcad76e84d4970d"
@@ -2242,6 +2297,8 @@ dependencies = [
 "checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f"
 "checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba"
 "checksum libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe"
+"checksum lmdb-rkv 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "e25b4069789bf7ac069d6fd58229f18aec20c6f7cc9173cb731d11c10dbb6b6e"
+"checksum lmdb-rkv-sys 0.8.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c60e2728ce41a4d4fa4ccf3d07c105bebf198721117e6328a3cf1cb7e4242c70"
 "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
 "checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43"
 "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
@@ -2261,6 +2318,7 @@ dependencies = [
 "checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c"
 "checksum openssl 0.10.24 (registry+https://github.com/rust-lang/crates.io-index)" = "8152bb5a9b5b721538462336e3bef9a539f892715e5037fda0f984577311af15"
 "checksum openssl-sys 0.9.49 (registry+https://github.com/rust-lang/crates.io-index)" = "f4fad9e54bd23bd4cbbe48fdc08a1b8091707ac869ef8508edea2fec77dcc884"
+"checksum ordered-float 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "18869315e81473c951eb56ad5558bbc56978562d3ecfb87abb7a1e944cea4518"
 "checksum os_type 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7edc011af0ae98b7f88cf7e4a83b70a54a75d2b8cb013d6efd02e5956207e9eb"
 "checksum pbkdf2 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9"
 "checksum pbr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "deb73390ab68d81992bd994d145f697451bb0b54fd39738e72eef32458ad6907"
@@ -2300,6 +2358,7 @@ dependencies = [
 "checksum regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b143cceb2ca5e56d5671988ef8b15615733e7ee16cd348e064333b251b89343f"
 "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
 "checksum ring 0.16.7 (registry+https://github.com/rust-lang/crates.io-index)" = "796ae8317a07b04dffb1983bdc7045ccd02f741f0b411704f07fd35dbf99f757"
+"checksum rkv 0.9.7 (registry+https://github.com/rust-lang/crates.io-index)" = "4f9d6a4dd60be13a62ae1d19df68c0c85d77bbee3749b62bf35c49f207d3d750"
 "checksum rpassword 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b273c91bd242ca03ad6d71c143b6f17a48790e61f21a6c78568fa2b6774a24a4"
 "checksum rprompt 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1601f32bc5858aae3cbfa1c645c96c4d820cc5c16be0194f089560c00b6eb625"
 "checksum rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ca4eaef519b494d1f2848fc602d18816fed808a981aedf4f1f00ceb7c9d32cf"
diff --git a/Cargo.toml b/Cargo.toml
index 6885a6222816422e8f3758b28363cfaebb2bd964..7237617f517264420d52869aac9575a4daa9c05a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -14,6 +14,7 @@ members = [
     "lib/dubp/user-docs",
     "lib/dubp/wot",
     "lib/dunp/network-documents",
+    "lib/modules-lib/common-dal",
     "lib/modules/blockchain/blockchain",
     "lib/modules/blockchain/blockchain-dal",
     "lib/modules/skeleton",
diff --git a/lib/modules-lib/common-dal/Cargo.toml b/lib/modules-lib/common-dal/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..1bb3afb5c63306137c70f7bebeaafaaab4c6f167
--- /dev/null
+++ b/lib/modules-lib/common-dal/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "durs-common-dal"
+version = "0.1.0-a"
+authors = ["librelois <elois@ifee.fr>"]
+description = "Common Data Access Layer for Dunitrust project."
+license = "AGPL-3.0"
+edition = "2018"
+
+[lib]
+path = "src/lib.rs"
+
+[dependencies]
+bincode = "1.0.*"
+durs-common-tools = { path = "../../tools/common-tools" }
+fnv = "1.0.6"
+log = "0.4.*"
+rkv = "0.9.7"
+rustbreak = {version = "2.0.0-rc3", features = ["bin_enc"]}
+serde = { version = "1.0.*", features = ["derive"] }
+serde_json = "1.0.*"
+unwrap = "1.2.1"
+
+[dev-dependencies]
+
+
+[features]
diff --git a/lib/modules-lib/common-dal/src/errors.rs b/lib/modules-lib/common-dal/src/errors.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9af309760462c2c71028d4f4314ef6079449adeb
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/errors.rs
@@ -0,0 +1,76 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Common Datas Access Layer for Dunitrust project
+//! Errors manadgment
+
+use rustbreak::error::{RustbreakError, RustbreakErrorKind};
+
+#[derive(Debug)]
+/// Data Access Layer Error
+pub enum DALError {
+    /// Abort write transaction
+    WriteAbort {
+        /// Reason of transaction abort
+        reason: String,
+    },
+    /// Error in write operation
+    WriteError,
+    /// Error in read operation
+    ReadError,
+    /// A database is corrupted, you have to reset the data completely
+    DBCorrupted,
+    /// Error with the file system
+    FileSystemError,
+    /// Serialization/Deserialization error
+    SerdeError(String),
+    /// Rkv store error
+    StoreError(rkv::error::StoreError),
+    /// Capturing a panic signal during a write operation
+    WritePanic,
+    /// Unknown error
+    UnknowError,
+}
+
+impl From<bincode::Error> for DALError {
+    fn from(e: bincode::Error) -> DALError {
+        DALError::SerdeError(format!("{}", e))
+    }
+}
+
+impl From<rkv::error::StoreError> for DALError {
+    fn from(e: rkv::error::StoreError) -> DALError {
+        DALError::StoreError(e)
+    }
+}
+
+impl<T> From<std::sync::PoisonError<T>> for DALError {
+    fn from(_: std::sync::PoisonError<T>) -> DALError {
+        DALError::DBCorrupted
+    }
+}
+
+impl From<RustbreakError> for DALError {
+    fn from(rust_break_error: RustbreakError) -> DALError {
+        match rust_break_error.kind() {
+            RustbreakErrorKind::Serialization => DALError::WriteError,
+            RustbreakErrorKind::Deserialization => DALError::ReadError,
+            RustbreakErrorKind::Poison => DALError::DBCorrupted,
+            RustbreakErrorKind::Backend => DALError::FileSystemError,
+            RustbreakErrorKind::WritePanic => DALError::WritePanic,
+            _ => DALError::UnknowError,
+        }
+    }
+}
diff --git a/lib/modules-lib/common-dal/src/free_struct_db.rs b/lib/modules-lib/common-dal/src/free_struct_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b42fd09c1a0634507e21a0b1f595ef909c48db8e
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/free_struct_db.rs
@@ -0,0 +1,123 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define free structure database
+
+use crate::errors::DALError;
+use rustbreak::backend::{FileBackend, MemoryBackend};
+use rustbreak::error::RustbreakError;
+use rustbreak::{deser::Bincode, Database, FileDatabase, MemoryDatabase};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::default::Default;
+use std::fmt::Debug;
+use std::fs;
+use std::panic::UnwindSafe;
+use std::path::PathBuf;
+
+/// Open free structured rustbreak memory database
+pub fn open_free_struct_memory_db<
+    D: Serialize + DeserializeOwned + Debug + Default + Clone + Send,
+>() -> Result<MemoryDatabase<D, Bincode>, DALError> {
+    let backend = MemoryBackend::new();
+    let db = MemoryDatabase::<D, Bincode>::from_parts(D::default(), backend, Bincode);
+    Ok(db)
+}
+
+/// Open free structured rustbreak file database
+pub fn open_free_struct_file_db<
+    D: Serialize + DeserializeOwned + Debug + Default + Clone + Send,
+>(
+    dbs_folder_path: &PathBuf,
+    db_file_name: &str,
+) -> Result<FileDatabase<D, Bincode>, DALError> {
+    let mut db_path = dbs_folder_path.clone();
+    db_path.push(db_file_name);
+    let file_path = db_path.as_path();
+    if file_path.exists()
+        && fs::metadata(file_path)
+            .expect("fail to get file size")
+            .len()
+            > 0
+    {
+        let backend = FileBackend::open(db_path.as_path())?;
+        let db = FileDatabase::<D, Bincode>::from_parts(D::default(), backend, Bincode);
+        db.load()?;
+        Ok(db)
+    } else {
+        Ok(FileDatabase::<D, Bincode>::from_path(
+            db_path.as_path(),
+            D::default(),
+        )?)
+    }
+}
+
+#[derive(Debug)]
+/// Database
+pub enum BinFreeStructDb<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send> {
+    /// File database
+    File(Database<D, FileBackend, Bincode>),
+    /// Memory database
+    Mem(Database<D, MemoryBackend, Bincode>),
+}
+
+impl<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send> BinFreeStructDb<D> {
+    /// Flush the data structure to the backend
+    pub fn save(&self) -> Result<(), RustbreakError> {
+        match *self {
+            BinFreeStructDb::File(ref file_db) => file_db.save(),
+            BinFreeStructDb::Mem(ref mem_db) => mem_db.save(),
+        }
+    }
+    /// Read lock the database and get write access to the Data container
+    /// This gives you a read-only lock on the database. You can have as many readers in parallel as you wish.
+    pub fn read<T, R>(&self, task: T) -> Result<R, RustbreakError>
+    where
+        T: FnOnce(&D) -> R,
+    {
+        match *self {
+            BinFreeStructDb::File(ref file_db) => file_db.read(task),
+            BinFreeStructDb::Mem(ref mem_db) => mem_db.read(task),
+        }
+    }
+    /// Write lock the database and get write access to the Data container
+    /// This gives you an exclusive lock on the memory object. Trying to open the database in writing will block if it is currently being written to.
+    pub fn write<T>(&self, task: T) -> Result<(), RustbreakError>
+    where
+        T: FnOnce(&mut D),
+    {
+        match *self {
+            BinFreeStructDb::File(ref file_db) => file_db.write(task),
+            BinFreeStructDb::Mem(ref mem_db) => mem_db.write(task),
+        }
+    }
+    /// Write lock the database and get write access to the Data container in a safe way (clone of the internal data is made).
+    pub fn write_safe<T>(&self, task: T) -> Result<(), RustbreakError>
+    where
+        T: FnOnce(&mut D) + UnwindSafe,
+    {
+        match *self {
+            BinFreeStructDb::File(ref file_db) => file_db.write_safe(task),
+            BinFreeStructDb::Mem(ref mem_db) => mem_db.write_safe(task),
+        }
+    }
+    /// Load the Data from the backend
+    pub fn load(&self) -> Result<(), RustbreakError> {
+        match *self {
+            BinFreeStructDb::File(ref file_db) => file_db.load(),
+            BinFreeStructDb::Mem(ref mem_db) => mem_db.load(),
+        }
+    }
+}
diff --git a/lib/modules-lib/common-dal/src/kv_db.rs b/lib/modules-lib/common-dal/src/kv_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d6c9f4c4bc9c4c140ade28376140da980d8842ab
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/kv_db.rs
@@ -0,0 +1,349 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define Key-Value database
+
+mod bin_kiv_db;
+mod bin_kv_db;
+mod file;
+mod file_inner_trait;
+mod mem;
+
+pub use bin_kiv_db::FileKivDb;
+pub use bin_kv_db::FileKvDb;
+
+use crate::errors::DALError;
+use durs_common_tools::fatal_error;
+use file_inner_trait::FileKvDbInnerTrait;
+use log::error;
+use rkv::{EnvironmentFlags, IntegerStore, Manager, Rkv, SingleStore, StoreOptions, Value};
+use rustbreak::{backend::MemoryBackend, deser::Bincode, MemoryDatabase};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::collections::HashMap;
+use std::fmt::Debug;
+use std::path::{Path, PathBuf};
+
+/// Key-value Database with integer key
+pub type BinKivDb<V> = KvDbStore<FileKivDb<V>>;
+
+/// Key-value Database
+pub type BinKvDb<K, V> = KvDbStore<FileKvDb<K, V>>;
+
+// Key-value database reader
+pub struct KvDbReader<'a> {
+    reader: Option<&'a rkv::Reader<'a>>,
+}
+
+/// Key-value database writer
+pub struct KvDbWriter<'a> {
+    writer: Option<rkv::Writer<'a>>,
+}
+
+pub trait KvDbReadOnlyHandlerTrait: Sized {
+    // TODO fn read()
+    /// Try to clone database handler
+    fn try_clone(&self) -> Result<Self, DALError>;
+
+    /// Read datas in transaction database
+    fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>;
+}
+
+pub trait KvDbHandlerTrait: Sized {
+    type ReadOnlyHandler: KvDbReadOnlyHandlerTrait;
+
+    /// Get read only handler
+    fn get_ro_handler(&self) -> Result<Self::ReadOnlyHandler, DALError>;
+    /// Open Key-value database
+    fn open_db(path: Option<&PathBuf>, schema: KbDbSchema) -> Result<Self, DALError>;
+    /// Read datas in transaction database
+    fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>;
+    /// Write datas in database
+    /// /!\ The written data are visible to readers not persisted on the disk until a save() is performed.
+    fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>;
+    /// Try to clone database handler
+    fn try_clone(&self) -> Result<Self, DALError>;
+}
+
+pub struct KvDbReadOnlyHandler(KvDbHandler);
+
+impl KvDbReadOnlyHandlerTrait for KvDbReadOnlyHandler {
+    fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>,
+    {
+        self.0.read(f)
+    }
+    fn try_clone(&self) -> Result<Self, DALError> {
+        Ok(KvDbReadOnlyHandler(self.0.try_clone()?))
+    }
+}
+
+/// Key-value database
+pub struct KvDbHandler {
+    db_handler: KvDbEnum,
+}
+
+/// Key-value database enum
+enum KvDbEnum {
+    /// Key-value file Database
+    File(file::KvFileDbHandler),
+    /// Key-value memory Database
+    Mem(mem::KvMemDbHandler),
+}
+
+impl KvDbHandlerTrait for KvDbHandler {
+    type ReadOnlyHandler = KvDbReadOnlyHandler;
+
+    fn get_ro_handler(&self) -> Result<Self::ReadOnlyHandler, DALError> {
+        Ok(KvDbReadOnlyHandler(self.try_clone()?))
+    }
+    fn open_db(path: Option<&PathBuf>, schema: KbDbSchema) -> Result<Self, DALError> {
+        let db_handler = if let Some(path) = path {
+            KvDbEnum::File(file::KvFileDbHandler::open_db(path, &schema)?)
+        } else {
+            KvDbEnum::Mem(mem::KvMemDbHandler::open_db(&schema)?)
+        };
+
+        Ok(KvDbHandler { db_handler })
+    }
+    fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>,
+    {
+        match &self.db_handler {
+            KvDbEnum::File(fdb) => fdb.read(f),
+            KvDbEnum::Mem(mdb) => mdb.read(f),
+        }
+    }
+    fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>,
+    {
+        match &self.db_handler {
+            KvDbEnum::File(fdb) => fdb.write(f),
+            KvDbEnum::Mem(mdb) => mdb.write(f),
+        }
+    }
+    fn try_clone(&self) -> Result<Self, DALError> {
+        let db_clone = match &self.db_handler {
+            KvDbEnum::File(fdb) => KvDbEnum::File(fdb.try_clone()?),
+            KvDbEnum::Mem(mdb) => KvDbEnum::Mem(mdb.clone()),
+        };
+
+        Ok(KvDbHandler {
+            db_handler: db_clone,
+        })
+    }
+}
+
+/// Describe Key-Value database schema
+#[derive(Debug, Clone)]
+pub struct KbDbSchema {
+    stores: HashMap<String, KvDbStoreType>,
+}
+
+/// Key-value Database
+pub enum KvDbStore<S>
+where
+    S: FileKvDbTrait,
+{
+    /// File database
+    File(S),
+    /// Memory database
+    Mem(MemoryDatabase<HashMap<<S as FileKvDbInnerTrait>::K, S::V>, Bincode>),
+}
+
+/// Key-value store type (store is like "table" in SGBD)
+#[derive(Debug, Clone, Copy)]
+pub enum KvDbStoreType {
+    /// Single valued map
+    Single,
+    /// Single valued map with integer key
+    SingleIntKey,
+}
+
+impl<S> KvDbStore<S>
+where
+    S: FileKvDbTrait,
+{
+    /// Get one value
+    pub fn get(
+        &self,
+        reader: &KvDbReader,
+        k: <S as FileKvDbInnerTrait>::K,
+    ) -> Result<Option<S::V>, DALError> {
+        match self {
+            KvDbStore::File(file_kv_db) => file_kv_db.get(reader, k),
+            KvDbStore::Mem(mem_kv_db) => Ok(mem_kv_db.read(|datas| datas.get(&k).cloned())?),
+        }
+    }
+    /// Put one value
+    pub fn put(
+        &self,
+        writer: &mut KvDbWriter,
+        k: <S as FileKvDbInnerTrait>::K,
+        v: &S::V,
+    ) -> Result<(), DALError> {
+        match self {
+            KvDbStore::File(file_kv_db) => file_kv_db.put(writer, k, v),
+            KvDbStore::Mem(mem_kv_db) => Ok(mem_kv_db.write(|datas| {
+                datas.insert(k, v.clone());
+            })?),
+        }
+    }
+    /// Delete one value
+    pub fn delete(
+        &self,
+        writer: &mut KvDbWriter,
+        k: <S as FileKvDbInnerTrait>::K,
+    ) -> Result<(), DALError> {
+        match self {
+            KvDbStore::File(file_kv_db) => file_kv_db.delete(writer, k),
+            KvDbStore::Mem(mem_kv_db) => Ok(mem_kv_db.write(|datas| {
+                datas.remove(&k);
+            })?),
+        }
+    }
+    /// Open a Key-Value database
+    pub fn open(path: Option<&PathBuf>, collection_name: &str) -> Result<Self, DALError> {
+        if let Some(path) = path {
+            Ok(KvDbStore::File(S::open(path.as_path(), collection_name)?))
+        } else {
+            let backend = MemoryBackend::new();
+            Ok(KvDbStore::Mem(MemoryDatabase::<
+                HashMap<<S as FileKvDbInnerTrait>::K, S::V>,
+                Bincode,
+            >::from_parts(
+                HashMap::default(), backend, Bincode
+            )))
+        }
+    }
+    /// Read datas in transaction database
+    pub fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>,
+    {
+        match self {
+            KvDbStore::File(file_kv_db) => file_kv_db.read(f),
+            KvDbStore::Mem(_) => f(KvDbReader { reader: None }),
+        }
+    }
+    /// Persist DB datas on disk
+    pub fn save(&self) -> Result<(), DALError> {
+        if let KvDbStore::File(file_kv_db) = self {
+            file_kv_db.save()
+        } else {
+            Ok(())
+        }
+    }
+    /// Write datas in database
+    /// /!\ The written data are visible to readers not persisted on the disk until a save() is performed.
+    pub fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>,
+    {
+        match self {
+            KvDbStore::File(file_kv_db) => file_kv_db.write(f),
+            KvDbStore::Mem(_) => {
+                f(KvDbWriter { writer: None })?;
+                Ok(())
+            }
+        }
+    }
+}
+
+pub trait FileKvDbTrait: FileKvDbInnerTrait {
+    /// Value
+    type V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send;
+
+    /// Get one value
+    fn get(
+        &self,
+        reader: &KvDbReader,
+        k: <Self as FileKvDbInnerTrait>::K,
+    ) -> Result<Option<Self::V>, DALError>;
+    /// Put one value
+    fn put(
+        &self,
+        writer: &mut KvDbWriter,
+        k: <Self as FileKvDbInnerTrait>::K,
+        v: &Self::V,
+    ) -> Result<(), DALError> {
+        if let Some(ref mut writer) = writer.writer {
+            self.store_put(writer, k, &Value::Blob(&bincode::serialize(v)?[..]))?;
+            Ok(())
+        } else {
+            fatal_error!("Dev err: writer for file db must have a file writer");
+        }
+    }
+    /// Delete one value
+    fn delete(
+        &self,
+        writer: &mut KvDbWriter,
+        k: <Self as FileKvDbInnerTrait>::K,
+    ) -> Result<(), DALError> {
+        if let Some(ref mut writer) = writer.writer {
+            self.store_delete(writer, k)?;
+            Ok(())
+        } else {
+            fatal_error!("Dev err: writer for file db must have a file writer");
+        }
+    }
+    /// Open a Key-Value database
+    fn open(path: &Path, collection_name: &str) -> Result<Self, DALError> {
+        let mut manager = Manager::singleton().write()?;
+        let mut env = Rkv::environment_builder();
+        env.set_flags(EnvironmentFlags::NO_SYNC)
+            .set_max_dbs(64)
+            .set_map_size(std::u32::MAX as usize);
+        let arc = manager.get_or_create(path, |path| Rkv::from_env(path, env))?;
+        Self::store_open(arc, collection_name)
+    }
+    /// Read datas in transaction database
+    fn read<F, D>(&self, f: F) -> Result<D, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<D, DALError>,
+    {
+        Ok(f(KvDbReader {
+            reader: Some(&self.arc_clone().read()?.read()?),
+        })?)
+    }
+    /// Persist DB datas on disk
+    fn save(&self) -> Result<(), DALError> {
+        Ok(self.arc_clone().read()?.sync(true)?)
+    }
+    /// Write datas in database
+    /// /!\ The written data are visible to readers not persisted on the disk until a save() is performed.
+    fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>,
+    {
+        if let Some(writer) = f(KvDbWriter {
+            writer: Some(self.arc().read()?.write()?),
+        })?
+        .writer
+        {
+            writer.commit()?;
+        }
+        Ok(())
+    }
+}
diff --git a/lib/modules-lib/common-dal/src/kv_db/bin_kiv_db.rs b/lib/modules-lib/common-dal/src/kv_db/bin_kiv_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9f45bb6fb8875895d73c6b55ba140c9eeb0013d7
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/kv_db/bin_kiv_db.rs
@@ -0,0 +1,94 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define Key-Value database with integer key
+
+use super::file_inner_trait::FileKvDbInnerTrait;
+use super::{FileKvDbTrait, KvDbReader};
+use crate::errors::DALError;
+use rkv::{IntegerStore, Rkv, StoreError, StoreOptions, Value};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::fmt::Debug;
+use std::marker::PhantomData;
+use std::sync::{Arc, RwLock};
+
+/// Key-Value persisted DB with integer key
+pub struct FileKivDb<V>
+where
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    arc: Arc<RwLock<Rkv>>,
+    store: IntegerStore<u32>,
+    //store_name: String,
+    phantom: PhantomData<V>,
+}
+
+impl<V> FileKvDbInnerTrait for FileKivDb<V>
+where
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    type K = u32;
+
+    fn arc(&self) -> &Arc<RwLock<Rkv>> {
+        &self.arc
+    }
+    fn arc_clone(&self) -> Arc<RwLock<Rkv>> {
+        self.arc().clone()
+    }
+    fn store_open(arc: Arc<RwLock<Rkv>>, store_name: &str) -> Result<Self, DALError> {
+        let store = arc
+            .clone()
+            .read()?
+            .open_integer(store_name, StoreOptions::create())?;
+        Ok(FileKivDb {
+            arc,
+            store,
+            //store_name: collection_name.to_owned(),
+            phantom: PhantomData,
+        })
+    }
+    fn store_put(
+        &self,
+        writer: &mut rkv::Writer,
+        k: Self::K,
+        value: &Value,
+    ) -> Result<(), StoreError> {
+        self.store.put(writer, k, value)
+    }
+    fn store_delete(&self, writer: &mut rkv::Writer, k: Self::K) -> Result<(), StoreError> {
+        self.store.delete(writer, k)
+    }
+}
+
+impl<V> FileKvDbTrait for FileKivDb<V>
+where
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    type V = V;
+
+    fn get(&self, reader: &KvDbReader, k: u32) -> Result<Option<V>, DALError> {
+        if let Some(Value::Blob(v)) = self.store.get(
+            reader
+                .reader
+                .expect("Dev err: reader for file db must have a file reader"),
+            k,
+        )? {
+            Ok(Some(bincode::deserialize(&v)?))
+        } else {
+            Ok(None)
+        }
+    }
+}
diff --git a/lib/modules-lib/common-dal/src/kv_db/bin_kv_db.rs b/lib/modules-lib/common-dal/src/kv_db/bin_kv_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5ecafb743f56e1eecdd4dc588eb2573b9f348d89
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/kv_db/bin_kv_db.rs
@@ -0,0 +1,189 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define Key-Value database
+
+use super::file_inner_trait::FileKvDbInnerTrait;
+use super::{FileKvDbTrait, KvDbReader};
+use crate::errors::DALError;
+use rkv::{Rkv, SingleStore, StoreError, StoreOptions, Value};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::marker::PhantomData;
+use std::sync::{Arc, RwLock};
+
+/// Key-Value persisted DB
+pub struct FileKvDb<K, V>
+where
+    K: 'static + AsRef<[u8]> + Clone + Debug + DeserializeOwned + Eq + Hash + Send + Serialize,
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    arc: Arc<RwLock<Rkv>>,
+    store: SingleStore,
+    //store_name: String,
+    phantom_key: PhantomData<K>,
+    phantom_value: PhantomData<V>,
+}
+
+impl<K, V> FileKvDbInnerTrait for FileKvDb<K, V>
+where
+    K: 'static + AsRef<[u8]> + Clone + Debug + DeserializeOwned + Eq + Hash + Send + Serialize,
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    type K = K;
+
+    fn arc(&self) -> &Arc<RwLock<Rkv>> {
+        &self.arc
+    }
+    fn arc_clone(&self) -> Arc<RwLock<Rkv>> {
+        self.arc().clone()
+    }
+    fn store_open(arc: Arc<RwLock<Rkv>>, store_name: &str) -> Result<Self, DALError> {
+        let store = arc
+            .clone()
+            .read()?
+            .open_single(store_name, StoreOptions::create())?;
+        Ok(FileKvDb {
+            arc,
+            store,
+            //store_name: collection_name.to_owned(),
+            phantom_key: PhantomData,
+            phantom_value: PhantomData,
+        })
+    }
+    fn store_put(
+        &self,
+        writer: &mut rkv::Writer,
+        k: Self::K,
+        value: &Value,
+    ) -> Result<(), StoreError> {
+        self.store.put(writer, k, value)
+    }
+    fn store_delete(&self, writer: &mut rkv::Writer, k: Self::K) -> Result<(), StoreError> {
+        self.store.delete(writer, k)
+    }
+}
+
+impl<K, V> FileKvDbTrait for FileKvDb<K, V>
+where
+    K: 'static + AsRef<[u8]> + Clone + Debug + DeserializeOwned + Eq + Hash + Send + Serialize,
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    type V = V;
+
+    fn get(&self, reader: &KvDbReader, k: K) -> Result<Option<V>, DALError> {
+        if let Some(Value::Blob(v)) = self.store.get(
+            reader
+                .reader
+                .expect("Dev err: reader for file db must have a file reader"),
+            k,
+        )? {
+            Ok(Some(bincode::deserialize(&v)?))
+        } else {
+            Ok(None)
+        }
+    }
+}
+
+/*
+impl<K, V> FileKvDb<K, V>
+where
+    K: 'static + AsRef<[u8]> + Serialize + DeserializeOwned + Debug + Clone + Send,
+    V: 'static + Serialize + DeserializeOwned + Debug + Clone + Send,
+{
+    /// Get one value
+    pub fn get(&self, reader: &KvDbReader, k: K) -> Result<Option<V>, DALError> {
+        if let Some(Value::Blob(v)) = self.store.get(
+            reader
+                .reader
+                .expect("Dev err: reader for file db must have a file reader"),
+            k,
+        )? {
+            Ok(Some(bincode::deserialize(&v)?))
+        } else {
+            Ok(None)
+        }
+    }
+    /// Put one value
+    pub fn put(&self, writer: &mut KvDbWriter, k: K, v: &V) -> Result<(), DALError> {
+        if let Some(ref mut writer) = writer.writer {
+            self.store
+                .put(writer, k, &Value::Blob(&bincode::serialize(v)?[..]))?;
+            Ok(())
+        } else {
+            fatal_error!("Dev err: writer for file db must have a file writer");
+        }
+    }
+    /// Delete one value
+    pub fn delete(&self, writer: &mut KvDbWriter, k: K) -> Result<(), DALError> {
+        if let Some(ref mut writer) = writer.writer {
+            self.store.delete(writer, k)?;
+            Ok(())
+        } else {
+            fatal_error!("Dev err: writer for file db must have a file writer");
+        }
+    }
+    /// Open a Key-Value database
+    pub fn open(path: &Path, collection_name: &str) -> Result<Self, DALError> {
+        let mut manager = Manager::singleton().write()?;
+        let mut env = Rkv::environment_builder();
+        env.set_flags(EnvironmentFlags::NO_SYNC)
+            .set_max_dbs(64)
+            .set_map_size(std::u32::MAX as usize);
+        let arc = manager.get_or_create(path, |path| Rkv::from_env(path, env))?;
+        let store = arc
+            .clone()
+            .read()?
+            .open_single(collection_name, StoreOptions::create())?;
+        Ok(FileKvDb {
+            arc,
+            store,
+            //store_name: collection_name.to_owned(),
+            phantom_key: PhantomData,
+            phantom_value: PhantomData,
+        })
+    }
+    /// Read datas in transaction database
+    pub fn read<F, D>(&self, f: F) -> Result<D, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<D, DALError>,
+    {
+        Ok(f(KvDbReader {
+            reader: Some(&self.arc.clone().read()?.read()?),
+        })?)
+    }
+    /// Persist DB datas on disk
+    pub fn save(&self) -> Result<(), DALError> {
+        Ok(self.arc.clone().read()?.sync(true)?)
+    }
+    /// Write datas in database
+    /// /!\ The written data are visible to readers not persisted on the disk until a save() is performed.
+    pub fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>,
+    {
+        if let Some(writer) = f(KvDbWriter {
+            writer: Some(self.arc.read()?.write()?),
+        })?
+        .writer
+        {
+            writer.commit()?;
+        }
+        Ok(())
+    }
+}
+*/
diff --git a/lib/modules-lib/common-dal/src/kv_db/file.rs b/lib/modules-lib/common-dal/src/kv_db/file.rs
new file mode 100644
index 0000000000000000000000000000000000000000..fda54216ade8d5027564e4066ba444c21a90e3a1
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/kv_db/file.rs
@@ -0,0 +1,144 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define Key-Value file database
+
+use super::{KbDbSchema, KvDbReader, KvDbStoreType, KvDbWriter};
+use crate::errors::DALError;
+//se durs_common_tools::fatal_error;
+//use log::error;
+use rkv::{
+    DatabaseFlags, EnvironmentFlags, IntegerStore, Manager, Rkv, SingleStore, StoreOptions, Value,
+};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::collections::HashMap;
+use std::fmt::Debug;
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, RwLock};
+
+/// Key-value file Database read-only handler
+pub struct KvFileDbReadOnlyHandler(KvFileDbHandler);
+
+impl KvFileDbReadOnlyHandler {
+    /// Try to clone database handler
+    pub fn try_clone(&self) -> Result<KvFileDbHandler, DALError> {
+        self.0.try_clone()
+    }
+}
+
+/// Key-value file Database handler
+pub struct KvFileDbHandler {
+    arc: Arc<RwLock<Rkv>>,
+    path: PathBuf,
+    schema: KbDbSchema,
+    stores: HashMap<String, KvFileDbStore>,
+}
+
+/// Key-value file DB store (store is like "table" in SGBD)
+enum KvFileDbStore {
+    /// Single valued map
+    Single(SingleStore),
+    /// Single valued map with integer key
+    SingleIntKey(IntegerStore<u32>),
+}
+
+impl KvFileDbHandler {
+    fn arc(&self) -> &Arc<RwLock<Rkv>> {
+        &self.arc
+    }
+    fn arc_clone(&self) -> Arc<RwLock<Rkv>> {
+        self.arc().clone()
+    }
+    /// Get read_only handler
+    pub fn get_ro_handler(&self) -> Result<KvFileDbReadOnlyHandler, DALError> {
+        Ok(KvFileDbReadOnlyHandler(self.try_clone()?))
+    }
+    /// Open Key-value file Database
+    #[inline]
+    pub fn open_db(path: &PathBuf, schema: &KbDbSchema) -> Result<KvFileDbHandler, DALError> {
+        KvFileDbHandler::open_db_inner(path, schema, true)
+    }
+    fn open_db_inner(
+        path: &PathBuf,
+        schema: &KbDbSchema,
+        first_open: bool,
+    ) -> Result<KvFileDbHandler, DALError> {
+        let mut manager = Manager::singleton().write()?;
+        let mut env = Rkv::environment_builder();
+        env.set_flags(EnvironmentFlags::NO_SYNC)
+            .set_max_dbs(64)
+            .set_map_size(std::u32::MAX as usize);
+        let arc = manager.get_or_create(path.as_path(), |path| Rkv::from_env(path, env))?;
+
+        let mut stores = HashMap::new();
+        for (store_name, store_type) in &schema.stores {
+            let store = match store_type {
+                KvDbStoreType::Single => KvFileDbStore::Single(arc.clone().read()?.open_single(
+                    store_name.as_str(),
+                    StoreOptions {
+                        create: first_open,
+                        flags: DatabaseFlags::empty(),
+                    },
+                )?),
+                KvDbStoreType::SingleIntKey => {
+                    KvFileDbStore::SingleIntKey(arc.clone().read()?.open_integer(
+                        store_name.as_str(),
+                        StoreOptions {
+                            create: first_open,
+                            flags: DatabaseFlags::empty(),
+                        },
+                    )?)
+                }
+            };
+            stores.insert(store_name.to_owned(), store);
+        }
+
+        Ok(KvFileDbHandler {
+            arc,
+            path: path.clone(),
+            schema: schema.clone(),
+            stores,
+        })
+    }
+    /// Read values in database
+    pub fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>,
+    {
+        Ok(f(KvDbReader {
+            reader: Some(&self.arc_clone().read()?.read()?),
+        })?)
+    }
+    /// Write datas in database
+    /// /!\ The written data are visible to readers not persisted on the disk until a save() is performed.
+    pub fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>,
+    {
+        if let Some(writer) = f(KvDbWriter {
+            writer: Some(self.arc().read()?.write()?),
+        })?
+        .writer
+        {
+            writer.commit()?;
+        }
+        Ok(())
+    }
+    /// Try to clone database handler
+    pub fn try_clone(&self) -> Result<KvFileDbHandler, DALError> {
+        KvFileDbHandler::open_db_inner(&self.path, &self.schema, false)
+    }
+}
diff --git a/lib/modules-lib/common-dal/src/kv_db/file_inner_trait.rs b/lib/modules-lib/common-dal/src/kv_db/file_inner_trait.rs
new file mode 100644
index 0000000000000000000000000000000000000000..393bb472510f53d70e2357e0da95fff8fc71fabc
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/kv_db/file_inner_trait.rs
@@ -0,0 +1,40 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define inner trait must be implemented by all Key-Value databases
+//! This inner trait is not public for other crates.
+
+use crate::errors::DALError;
+use rkv::{Rkv, StoreError, Value};
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::sync::{Arc, RwLock};
+
+pub trait FileKvDbInnerTrait: Sized {
+    type K: 'static + Clone + Debug + DeserializeOwned + Eq + Hash + Send + Serialize;
+
+    fn arc(&self) -> &Arc<RwLock<Rkv>>;
+    fn arc_clone(&self) -> Arc<RwLock<Rkv>>;
+    fn store_open(arc: Arc<RwLock<Rkv>>, store_name: &str) -> Result<Self, DALError>;
+    fn store_put(
+        &self,
+        writer: &mut rkv::Writer,
+        k: Self::K,
+        value: &Value,
+    ) -> Result<(), StoreError>;
+    fn store_delete(&self, writer: &mut rkv::Writer, k: Self::K) -> Result<(), StoreError>;
+}
diff --git a/lib/modules-lib/common-dal/src/kv_db/mem.rs b/lib/modules-lib/common-dal/src/kv_db/mem.rs
new file mode 100644
index 0000000000000000000000000000000000000000..7068d55527daa825ab6324adc2106d620b84db59
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/kv_db/mem.rs
@@ -0,0 +1,106 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Define Key-Value memory database
+
+use super::{KbDbSchema, KvDbReader, KvDbStoreType, KvDbWriter};
+use crate::errors::DALError;
+use rustbreak::{backend::MemoryBackend, deser::Bincode, MemoryDatabase};
+use std::collections::HashMap;
+use std::sync::Arc;
+
+/// Key-value memory database read-only handler
+#[derive(Clone)]
+pub struct KvMemDbReadOnlyHandler(KvMemDbHandler);
+
+/// Key-value memory database handler
+#[derive(Clone)]
+pub struct KvMemDbHandler {
+    stores: HashMap<String, KvMemDbStore>,
+}
+type KvMemDbSingleStore = MemoryDatabase<HashMap<Vec<u8>, Vec<u8>>, Bincode>;
+type KvMemDbIntegerStore = MemoryDatabase<HashMap<u32, Vec<u8>>, Bincode>;
+
+/// Key-value memory database datas
+type KvMemDbDatas = HashMap<String, KvMemStoreDatas>;
+
+/// Key-value file DB store datas (store is like "table" in SGBD)
+enum KvMemStoreDatas {
+    /// Single valued map
+    Single(HashMap<Vec<u8>, Vec<u8>>),
+    /// Single valued map with integer key
+    SingleIntKey(HashMap<u32, Vec<u8>>),
+}
+
+/// Key-value file DB store (store is like "table" in SGBD)
+#[derive(Clone)]
+enum KvMemDbStore {
+    /// Single valued map
+    Single(Arc<KvMemDbSingleStore>),
+    /// Single valued map with integer key
+    SingleIntKey(Arc<KvMemDbIntegerStore>),
+}
+
+impl KvMemDbHandler {
+    /// Get read_only handler
+    pub fn get_ro_handler(&self) -> Result<KvMemDbReadOnlyHandler, DALError> {
+        Ok(KvMemDbReadOnlyHandler(self.clone()))
+    }
+    /// Open Key-value file Database
+    pub fn open_db(schema: &KbDbSchema) -> Result<KvMemDbHandler, DALError> {
+        let mut stores = HashMap::new();
+        for (store_name, store_type) in &schema.stores {
+            let backend = MemoryBackend::new();
+            let store = match store_type {
+                KvDbStoreType::Single => {
+                    KvMemDbStore::Single(Arc::new(MemoryDatabase::<
+                        HashMap<Vec<u8>, Vec<u8>>,
+                        Bincode,
+                    >::from_parts(
+                        HashMap::default(), backend, Bincode
+                    )))
+                }
+                KvDbStoreType::SingleIntKey => {
+                    KvMemDbStore::SingleIntKey(Arc::new(MemoryDatabase::<
+                        HashMap<u32, Vec<u8>>,
+                        Bincode,
+                    >::from_parts(
+                        HashMap::default(), backend, Bincode
+                    )))
+                }
+            };
+            stores.insert(store_name.to_owned(), store);
+        }
+
+        Ok(KvMemDbHandler { stores })
+    }
+    /// Read values in database
+    pub fn read<F, R>(&self, f: F) -> Result<R, DALError>
+    where
+        F: FnOnce(KvDbReader) -> Result<R, DALError>,
+    {
+        Ok(f(KvDbReader { reader: None })?)
+    }
+    /// Write datas in database
+    /// /!\ The written data are visible to readers not persisted on the disk until a save() is performed.
+    pub fn write<F>(&self, f: F) -> Result<(), DALError>
+    where
+        F: FnOnce(KvDbWriter) -> Result<KvDbWriter, DALError>,
+    {
+        f(KvDbWriter { writer: None })?;
+
+        Ok(())
+    }
+}
diff --git a/lib/modules-lib/common-dal/src/lib.rs b/lib/modules-lib/common-dal/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..214b4388683469f93a33642292a91cdaf4bc5d11
--- /dev/null
+++ b/lib/modules-lib/common-dal/src/lib.rs
@@ -0,0 +1,57 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Common Datas Access Layer for Dunitrust project
+
+#![allow(dead_code, unused_imports, clippy::large_enum_variant)]
+#![deny(
+    missing_docs,
+    missing_copy_implementations,
+    trivial_casts,
+    trivial_numeric_casts,
+    unsafe_code,
+    unstable_features,
+    unused_import_braces,
+    unused_qualifications
+)]
+
+mod errors;
+mod free_struct_db;
+mod kv_db;
+
+pub use errors::DALError;
+pub use free_struct_db::{open_free_struct_file_db, open_free_struct_memory_db, BinFreeStructDb};
+pub use kv_db::{BinKivDb, BinKvDb};
+
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+use std::default::Default;
+use std::fmt::Debug;
+use std::path::PathBuf;
+
+/// Open free structured database
+pub fn open_free_struct_db<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send>(
+    dbs_folder_path: Option<&PathBuf>,
+    db_file_name: &str,
+) -> Result<BinFreeStructDb<D>, DALError> {
+    if let Some(dbs_folder_path) = dbs_folder_path {
+        Ok(BinFreeStructDb::File(open_free_struct_file_db::<D>(
+            dbs_folder_path,
+            db_file_name,
+        )?))
+    } else {
+        Ok(BinFreeStructDb::Mem(open_free_struct_memory_db::<D>()?))
+    }
+}
diff --git a/lib/modules/blockchain/blockchain-dal/Cargo.toml b/lib/modules/blockchain/blockchain-dal/Cargo.toml
index 13672ce3b531e32b001edf9a902f64e482964061..678b3ebd533cd92dca9f316ad6f63aa6d89572d0 100644
--- a/lib/modules/blockchain/blockchain-dal/Cargo.toml
+++ b/lib/modules/blockchain/blockchain-dal/Cargo.toml
@@ -10,6 +10,7 @@ edition = "2018"
 path = "src/lib.rs"
 
 [dependencies]
+bincode = "1.0.*"
 dubp-block-doc = { path = "../../../dubp/block-doc"} #, version = "0.1.0" }
 dubp-common-doc = { path = "../../../dubp/common-doc"} #, version = "0.1.0" }
 dubp-indexes = { path = "../../../dubp/indexes"} #, version = "0.1.0" }
@@ -18,12 +19,12 @@ dubp-currency-params = { path = "../../../dubp/currency-params" }
 dubp-user-docs= { path = "../../../dubp/user-docs" }
 durs-conf = { path = "../../../core/conf" }
 durs-module = { path = "../../../core/module" }
+durs-common-dal = { path = "../../../modules-lib/common-dal" }
 durs-common-tools = { path = "../../../tools/common-tools" }
 durs-wot = { path = "../../../dubp/wot" }
 fnv = "1.0.6"
-id_tree = "1.3.0"
 log = "0.4.*"
-rustbreak = {version = "2.0.0-rc3", features = ["bin_enc"]}
+rkv = "0.9.7"
 serde = "1.0.*"
 serde_derive = "1.0.*"
 serde_json = "1.0.*"
diff --git a/lib/modules/blockchain/blockchain-dal/src/constants.rs b/lib/modules/blockchain/blockchain-dal/src/constants.rs
index e44d0fd5fd2d43e335f27918a62015e2a18aac27..1ab2a08e033f6cd90b35f431ac648fdebbc1e25a 100644
--- a/lib/modules/blockchain/blockchain-dal/src/constants.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/constants.rs
@@ -18,3 +18,6 @@ pub static DEFAULT_PAGE_SIZE: &usize = &50;
 
 /// Currency parameters DB name
 pub const CURRENCY_PARAMS_DB_NAME: &str = "params.db";
+
+/// Local blockchain collection name
+pub static LOCAL_BLOCKCHAIN_COLLECTION_NAME: &'static str = "bc";
diff --git a/lib/modules/blockchain/blockchain-dal/src/entities/fork_tree.rs b/lib/modules/blockchain/blockchain-dal/src/entities/fork_tree.rs
index 7c27bfeab9310972c9205e0be963b194dbe79f85..5c3c941e6d9783518fc07248c84943e8bf7d095b 100644
--- a/lib/modules/blockchain/blockchain-dal/src/entities/fork_tree.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/entities/fork_tree.rs
@@ -112,6 +112,7 @@ impl TreeNode {
 #[derive(Debug, Clone, Serialize, Deserialize)]
 /// Tree store all forks branchs
 pub struct ForkTree {
+    current_blockstamp: Option<Blockstamp>,
     main_branch: HashMap<BlockNumber, TreeNodeId>,
     max_depth: usize,
     nodes: Vec<Option<TreeNode>>,
@@ -132,6 +133,7 @@ impl ForkTree {
     #[inline]
     pub fn new(max_depth: usize) -> Self {
         ForkTree {
+            current_blockstamp: None,
             main_branch: HashMap::with_capacity(max_depth + 1),
             max_depth,
             nodes: Vec::with_capacity(max_depth * 2),
@@ -140,6 +142,11 @@ impl ForkTree {
             sheets: HashSet::new(),
         }
     }
+    /// Get tree size
+    #[inline]
+    pub fn get_current_blockstamp(&self) -> Option<Blockstamp> {
+        self.current_blockstamp
+    }
     /// Set max depth
     #[inline]
     pub fn set_max_depth(&mut self, max_depth: usize) {
@@ -329,6 +336,9 @@ impl ForkTree {
                 self.pruning();
             }
         }
+
+        // Update current blockstamp
+        self.current_blockstamp = Some(new_current_blockstamp);
     }
     /// Find node with specific blockstamp
     pub fn find_node_with_blockstamp(&self, blockstamp: &Blockstamp) -> Option<TreeNodeId> {
@@ -377,6 +387,7 @@ impl ForkTree {
         self.removed_blockstamps.clear();
         if main_branch {
             self.main_branch.insert(data.id, new_node_id);
+            self.current_blockstamp = Some(data);
             if self.main_branch.len() > self.max_depth {
                 self.pruning();
             }
diff --git a/lib/modules/blockchain/blockchain-dal/src/lib.rs b/lib/modules/blockchain/blockchain-dal/src/lib.rs
index 7b2d1158b81f81597768391f8a1378de0bd7f6c8..045ea88db858a394ab9012a4cc06e21bd38ae7c4 100644
--- a/lib/modules/blockchain/blockchain-dal/src/lib.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/lib.rs
@@ -18,7 +18,6 @@
 #![allow(clippy::large_enum_variant)]
 #![deny(
     missing_docs,
-    missing_debug_implementations,
     missing_copy_implementations,
     trivial_casts,
     trivial_numeric_casts,
@@ -45,39 +44,39 @@ pub mod filters;
 /// Contains all read databases functions
 pub mod readers;
 
+//pub mod storage;
+
 /// Tools
 pub mod tools;
 
 /// Contains all write databases functions
 pub mod writers;
 
+pub use durs_common_dal::{
+    open_free_struct_db, open_free_struct_file_db, open_free_struct_memory_db,
+};
+pub use durs_common_dal::{BinFreeStructDb, BinKivDb, DALError};
+
+use crate::constants::LOCAL_BLOCKCHAIN_COLLECTION_NAME;
+use crate::entities::block::DALBlock;
+use crate::entities::identity::DALIdentity;
+use crate::entities::sources::{SourceAmount, UTXOContentV10};
+use crate::writers::transaction::DALTxV10;
 use dubp_common_doc::{BlockNumber, Blockstamp, PreviousBlockstamp};
+use dubp_indexes::sindex::UniqueIdUTXOv10;
 use dubp_user_docs::documents::transaction::*;
 use dup_crypto::hashs::Hash;
 use dup_crypto::keys::*;
 use durs_common_tools::fatal_error;
 use durs_wot::data::{rusty::RustyWebOfTrust, NodeId};
 use fnv::FnvHashMap;
-use rustbreak::backend::{FileBackend, MemoryBackend};
-use rustbreak::error::{RustbreakError, RustbreakErrorKind};
-use rustbreak::{deser::Bincode, Database, FileDatabase, MemoryDatabase};
-use serde::de::DeserializeOwned;
+//use rkv::{IntegerStore, Value};
 use serde::Serialize;
 use std::collections::{HashMap, HashSet};
-use std::default::Default;
-use std::fmt::Debug;
-use std::fs;
-use std::panic::UnwindSafe;
 use std::path::PathBuf;
 
-use crate::entities::block::DALBlock;
-use crate::entities::identity::DALIdentity;
-use crate::entities::sources::{SourceAmount, UTXOContentV10};
-use crate::writers::transaction::DALTxV10;
-use dubp_indexes::sindex::UniqueIdUTXOv10;
-
-/// All blocks of local blockchain indexed by block number
-pub type LocalBlockchainV10Datas = FnvHashMap<BlockNumber, DALBlock>;
+/// All blocks of local blockchain are stored in a key-value Db with integer key (block number)
+pub type LocalBcDbV10 = BinKivDb<DALBlock>;
 /// Forks tree meta datas (block number and hash only)
 pub type ForksTreeV10Datas = entities::fork_tree::ForkTree;
 /// Forks blocks referenced in tree indexed by their blockstamp
@@ -101,77 +100,18 @@ pub type UDsV10Datas = HashMap<PubKey, HashSet<BlockNumber>>;
 /// V10 Balances accounts
 pub type BalancesV10Datas = HashMap<UTXOConditionsGroup, (SourceAmount, HashSet<UniqueIdUTXOv10>)>;
 
-#[derive(Debug)]
-/// Database
-pub enum BinDB<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send> {
-    /// File database
-    File(Database<D, FileBackend, Bincode>),
-    /// Memory database
-    Mem(Database<D, MemoryBackend, Bincode>),
-}
-
-impl<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send> BinDB<D> {
-    /// Flush the data structure to the backend
-    pub fn save(&self) -> Result<(), RustbreakError> {
-        match *self {
-            BinDB::File(ref file_db) => file_db.save(),
-            BinDB::Mem(ref mem_db) => mem_db.save(),
-        }
-    }
-    /// Read lock the database and get write access to the Data container
-    /// This gives you a read-only lock on the database. You can have as many readers in parallel as you wish.
-    pub fn read<T, R>(&self, task: T) -> Result<R, RustbreakError>
-    where
-        T: FnOnce(&D) -> R,
-    {
-        match *self {
-            BinDB::File(ref file_db) => file_db.read(task),
-            BinDB::Mem(ref mem_db) => mem_db.read(task),
-        }
-    }
-    /// Write lock the database and get write access to the Data container
-    /// This gives you an exclusive lock on the memory object. Trying to open the database in writing will block if it is currently being written to.
-    pub fn write<T>(&self, task: T) -> Result<(), RustbreakError>
-    where
-        T: FnOnce(&mut D),
-    {
-        match *self {
-            BinDB::File(ref file_db) => file_db.write(task),
-            BinDB::Mem(ref mem_db) => mem_db.write(task),
-        }
-    }
-    /// Write lock the database and get write access to the Data container in a safe way (clone of the internal data is made).
-    pub fn write_safe<T>(&self, task: T) -> Result<(), RustbreakError>
-    where
-        T: FnOnce(&mut D) + UnwindSafe,
-    {
-        match *self {
-            BinDB::File(ref file_db) => file_db.write_safe(task),
-            BinDB::Mem(ref mem_db) => mem_db.write_safe(task),
-        }
-    }
-    /// Load the Data from the backend
-    pub fn load(&self) -> Result<(), RustbreakError> {
-        match *self {
-            BinDB::File(ref file_db) => file_db.load(),
-            BinDB::Mem(ref mem_db) => mem_db.load(),
-        }
-    }
-}
-
-#[derive(Debug)]
 /// Set of databases storing block information
 pub struct BlocksV10DBs {
     /// Local blockchain database
-    pub blockchain_db: BinDB<LocalBlockchainV10Datas>,
+    pub blockchain_db: LocalBcDbV10,
 }
 
 impl BlocksV10DBs {
     /// Open blocks databases from their respective files
     pub fn open(db_path: Option<&PathBuf>) -> BlocksV10DBs {
         BlocksV10DBs {
-            blockchain_db: open_db::<LocalBlockchainV10Datas>(db_path, "blockchain.db")
-                .expect("Fail to open LocalBlockchainV10DB"),
+            blockchain_db: LocalBcDbV10::open(db_path, LOCAL_BLOCKCHAIN_COLLECTION_NAME)
+                .expect("fail to open LocalBcDbV10"),
         }
     }
     /// Save blocks databases in their respective files
@@ -179,7 +119,7 @@ impl BlocksV10DBs {
         info!("BLOCKCHAIN-DAL: Save LocalBlockchainV10DB.");
         self.blockchain_db
             .save()
-            .expect("Fatal error : fail to save LocalBlockchainV10DB !");
+            .expect("Fatal error : fail to save LocalBcDbV10 !");
     }
 }
 
@@ -187,23 +127,26 @@ impl BlocksV10DBs {
 /// Set of databases storing forks informations
 pub struct ForksDBs {
     /// Fork tree (store only blockstamp)
-    pub fork_tree_db: BinDB<ForksTreeV10Datas>,
+    pub fork_tree_db: BinFreeStructDb<ForksTreeV10Datas>,
     /// Blocks in fork tree
-    pub fork_blocks_db: BinDB<ForksBlocksV10Datas>,
+    pub fork_blocks_db: BinFreeStructDb<ForksBlocksV10Datas>,
     /// Orphan blocks
-    pub orphan_blocks_db: BinDB<OrphanBlocksV10Datas>,
+    pub orphan_blocks_db: BinFreeStructDb<OrphanBlocksV10Datas>,
 }
 
 impl ForksDBs {
     /// Open fork databases from their respective files
     pub fn open(db_path: Option<&PathBuf>) -> ForksDBs {
         ForksDBs {
-            fork_tree_db: open_db::<ForksTreeV10Datas>(db_path, "fork_tree.db")
+            fork_tree_db: open_free_struct_db::<ForksTreeV10Datas>(db_path, "fork_tree.db")
                 .expect("Fail to open ForksTreeV10Datas"),
-            fork_blocks_db: open_db::<ForksBlocksV10Datas>(db_path, "fork_blocks.db")
+            fork_blocks_db: open_free_struct_db::<ForksBlocksV10Datas>(db_path, "fork_blocks.db")
                 .expect("Fail to open ForkForksBlocksV10DatassV10DB"),
-            orphan_blocks_db: open_db::<OrphanBlocksV10Datas>(db_path, "orphan_blocks.db")
-                .expect("Fail to open OrphanBlocksV10Datas"),
+            orphan_blocks_db: open_free_struct_db::<OrphanBlocksV10Datas>(
+                db_path,
+                "orphan_blocks.db",
+            )
+            .expect("Fail to open OrphanBlocksV10Datas"),
         }
     }
     /// Save fork databases in their respective files
@@ -225,24 +168,26 @@ impl ForksDBs {
 /// Set of databases storing web of trust information
 pub struct WotsV10DBs {
     /// Store wot graph
-    pub wot_db: BinDB<WotDB>,
+    pub wot_db: BinFreeStructDb<WotDB>,
     /// Store idrntities
-    pub identities_db: BinDB<IdentitiesV10Datas>,
+    pub identities_db: BinFreeStructDb<IdentitiesV10Datas>,
     /// Store memberships created_block_id (Use only to detect expirations)
-    pub ms_db: BinDB<MsExpirV10Datas>,
+    pub ms_db: BinFreeStructDb<MsExpirV10Datas>,
     /// Store certifications created_block_id (Use only to detect expirations)
-    pub certs_db: BinDB<CertsExpirV10Datas>,
+    pub certs_db: BinFreeStructDb<CertsExpirV10Datas>,
 }
 
 impl WotsV10DBs {
     /// Open wot databases from their respective files
     pub fn open(db_path: Option<&PathBuf>) -> WotsV10DBs {
         WotsV10DBs {
-            wot_db: open_db::<RustyWebOfTrust>(db_path, "wot.db").expect("Fail to open WotDB"),
-            identities_db: open_db::<IdentitiesV10Datas>(db_path, "identities.db")
+            wot_db: open_free_struct_db::<RustyWebOfTrust>(db_path, "wot.db")
+                .expect("Fail to open WotDB"),
+            identities_db: open_free_struct_db::<IdentitiesV10Datas>(db_path, "identities.db")
                 .expect("Fail to open IdentitiesV10DB"),
-            ms_db: open_db::<MsExpirV10Datas>(db_path, "ms.db").expect("Fail to open MsExpirV10DB"),
-            certs_db: open_db::<CertsExpirV10Datas>(db_path, "certs.db")
+            ms_db: open_free_struct_db::<MsExpirV10Datas>(db_path, "ms.db")
+                .expect("Fail to open MsExpirV10DB"),
+            certs_db: open_free_struct_db::<CertsExpirV10Datas>(db_path, "certs.db")
                 .expect("Fail to open CertsExpirV10DB"),
         }
     }
@@ -272,25 +217,26 @@ impl WotsV10DBs {
 /// Set of databases storing currency information
 pub struct CurrencyV10DBs {
     /// Store all UD sources
-    pub du_db: BinDB<UDsV10Datas>,
+    pub du_db: BinFreeStructDb<UDsV10Datas>,
     /// Store all Transactions
-    pub tx_db: BinDB<TxV10Datas>,
+    pub tx_db: BinFreeStructDb<TxV10Datas>,
     /// Store all UTXOs
-    pub utxos_db: BinDB<UTXOsV10Datas>,
+    pub utxos_db: BinFreeStructDb<UTXOsV10Datas>,
     /// Store balances of all address (and theirs UTXOs indexs)
-    pub balances_db: BinDB<BalancesV10Datas>,
+    pub balances_db: BinFreeStructDb<BalancesV10Datas>,
 }
 
 impl CurrencyV10DBs {
     /// Open currency databases from their respective files
     pub fn open(db_path: Option<&PathBuf>) -> CurrencyV10DBs {
         CurrencyV10DBs {
-            du_db: open_db::<UDsV10Datas>(db_path, "du.db").expect("Fail to open UDsV10DB"),
-            tx_db: open_db::<TxV10Datas>(db_path, "tx.db")
+            du_db: open_free_struct_db::<UDsV10Datas>(db_path, "du.db")
+                .expect("Fail to open UDsV10DB"),
+            tx_db: open_free_struct_db::<TxV10Datas>(db_path, "tx.db")
                 .unwrap_or_else(|_| fatal_error!("Fail to open TxV10DB")),
-            utxos_db: open_db::<UTXOsV10Datas>(db_path, "sources.db")
+            utxos_db: open_free_struct_db::<UTXOsV10Datas>(db_path, "sources.db")
                 .expect("Fail to open UTXOsV10DB"),
-            balances_db: open_db::<BalancesV10Datas>(db_path, "balances.db")
+            balances_db: open_free_struct_db::<BalancesV10Datas>(db_path, "balances.db")
                 .expect("Fail to open BalancesV10DB"),
         }
     }
@@ -316,36 +262,6 @@ impl CurrencyV10DBs {
     }
 }
 
-#[derive(Debug, Deserialize, Copy, Clone, PartialEq, Eq, Hash, Serialize)]
-/// Data Access Layer Error
-pub enum DALError {
-    /// Error in write operation
-    WriteError,
-    /// Error in read operation
-    ReadError,
-    /// A database is corrupted, you have to reset the data completely
-    DBCorrupted,
-    /// Error with the file system
-    FileSystemError,
-    /// Capturing a panic signal during a write operation
-    WritePanic,
-    /// Unknown error
-    UnknowError,
-}
-
-impl From<RustbreakError> for DALError {
-    fn from(rust_break_error: RustbreakError) -> DALError {
-        match rust_break_error.kind() {
-            RustbreakErrorKind::Serialization => DALError::WriteError,
-            RustbreakErrorKind::Deserialization => DALError::ReadError,
-            RustbreakErrorKind::Poison => DALError::DBCorrupted,
-            RustbreakErrorKind::Backend => DALError::FileSystemError,
-            RustbreakErrorKind::WritePanic => DALError::WritePanic,
-            _ => DALError::UnknowError,
-        }
-    }
-}
-
 /*#[derive(Debug, Clone)]
 pub struct WotStats {
     pub block_number: u32,
@@ -359,52 +275,3 @@ pub struct WotStats {
     pub average_centrality: usize,
     pub centralities: Vec<u64>,
 }*/
-
-/// Open Rustbreak database
-pub fn open_db<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send>(
-    dbs_folder_path: Option<&PathBuf>,
-    db_file_name: &str,
-) -> Result<BinDB<D>, DALError> {
-    if let Some(dbs_folder_path) = dbs_folder_path {
-        Ok(BinDB::File(open_file_db::<D>(
-            dbs_folder_path,
-            db_file_name,
-        )?))
-    } else {
-        Ok(BinDB::Mem(open_memory_db::<D>()?))
-    }
-}
-
-/// Open Rustbreak memory database
-pub fn open_memory_db<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send>(
-) -> Result<MemoryDatabase<D, Bincode>, DALError> {
-    let backend = MemoryBackend::new();
-    let db = MemoryDatabase::<D, Bincode>::from_parts(D::default(), backend, Bincode);
-    Ok(db)
-}
-
-/// Open Rustbreak file database
-pub fn open_file_db<D: Serialize + DeserializeOwned + Debug + Default + Clone + Send>(
-    dbs_folder_path: &PathBuf,
-    db_file_name: &str,
-) -> Result<FileDatabase<D, Bincode>, DALError> {
-    let mut db_path = dbs_folder_path.clone();
-    db_path.push(db_file_name);
-    let file_path = db_path.as_path();
-    if file_path.exists()
-        && fs::metadata(file_path)
-            .expect("fail to get file size")
-            .len()
-            > 0
-    {
-        let backend = FileBackend::open(db_path.as_path())?;
-        let db = FileDatabase::<D, Bincode>::from_parts(D::default(), backend, Bincode);
-        db.load()?;
-        Ok(db)
-    } else {
-        Ok(FileDatabase::<D, Bincode>::from_path(
-            db_path.as_path(),
-            D::default(),
-        )?)
-    }
-}
diff --git a/lib/modules/blockchain/blockchain-dal/src/readers/balance.rs b/lib/modules/blockchain/blockchain-dal/src/readers/balance.rs
index 2709ac8e7708743ecc5b8dd72932773e015f8281..df07d5d0dc3c9e2a8ffc8df96cee8cdea37d4b4b 100644
--- a/lib/modules/blockchain/blockchain-dal/src/readers/balance.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/readers/balance.rs
@@ -18,7 +18,7 @@ use crate::*;
 
 /// Get address balance
 pub fn get_address_balance(
-    balances_db: &BinDB<BalancesV10Datas>,
+    balances_db: &BinFreeStructDb<BalancesV10Datas>,
     address: &UTXOConditionsGroup,
 ) -> Result<Option<SourceAmount>, DALError> {
     Ok(balances_db.read(|db| {
diff --git a/lib/modules/blockchain/blockchain-dal/src/readers/block.rs b/lib/modules/blockchain/blockchain-dal/src/readers/block.rs
index 034b15e7736dc2cbe46d81156dc6dc9b067c62e6..eda80472ddcadd09634d25e69b30465d4552f2b5 100644
--- a/lib/modules/blockchain/blockchain-dal/src/readers/block.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/readers/block.rs
@@ -21,36 +21,22 @@ use dup_crypto::keys::*;
 use std::collections::HashMap;
 use unwrap::unwrap;
 
-/// get current blockstamp
-pub fn get_current_blockstamp(blocks_db: &BlocksV10DBs) -> Result<Option<Blockstamp>, DALError> {
-    Ok(blocks_db.blockchain_db.read(|db| {
-        let blockchain_len = db.len() as u32;
-        if blockchain_len == 0 {
-            None
-        } else if let Some(dal_block) = db.get(&BlockNumber(blockchain_len - 1)) {
-            Some(dal_block.blockstamp())
-        } else {
-            None
-        }
-    })?)
-}
-
 /// Get block hash
 pub fn get_block_hash(
-    db: &BinDB<LocalBlockchainV10Datas>,
+    bc_backend: &LocalBcDbV10,
     block_number: BlockNumber,
 ) -> Result<Option<BlockHash>, DALError> {
-    Ok(db.read(|db| {
-        if let Some(dal_block) = db.get(&block_number) {
-            dal_block.block.hash()
+    Ok(
+        if let Some(block) = get_block_in_local_blockchain(bc_backend, block_number)? {
+            block.hash()
         } else {
             None
-        }
-    })?)
+        },
+    )
 }
 /// Return true if the node already knows this block
 pub fn already_have_block(
-    blockchain_db: &BinDB<LocalBlockchainV10Datas>,
+    blockchain_db: &LocalBcDbV10,
     forks_dbs: &ForksDBs,
     blockstamp: Blockstamp,
     previous_hash: Option<Hash>,
@@ -84,14 +70,7 @@ pub fn already_have_block(
             }
         }
     } else {
-        return Ok(blockchain_db.read(|db| {
-            if let Some(dal_block) = db.get(&blockstamp.id) {
-                if dal_block.block.hash().unwrap_or_default() == blockstamp.hash {
-                    return true;
-                }
-            }
-            false
-        })?);
+        return Ok(get_block_in_local_blockchain(blockchain_db, blockstamp.id)?.is_some());
     }
 
     Ok(false)
@@ -99,76 +78,110 @@ pub fn already_have_block(
 
 /// Get block
 pub fn get_block(
-    blockchain_db: &BinDB<LocalBlockchainV10Datas>,
-    forks_blocks_db: Option<&BinDB<ForksBlocksV10Datas>>,
+    blockchain_db: &LocalBcDbV10,
+    forks_blocks_db: Option<&BinFreeStructDb<ForksBlocksV10Datas>>,
     blockstamp: &Blockstamp,
 ) -> Result<Option<DALBlock>, DALError> {
-    let dal_block = blockchain_db.read(|db| db.get(&blockstamp.id).cloned())?;
-    if dal_block.is_none() && forks_blocks_db.is_some() {
+    let opt_dal_block = get_dal_block_in_local_blockchain(blockchain_db, blockstamp.id)?;
+    if opt_dal_block.is_none() && forks_blocks_db.is_some() {
         Ok(forks_blocks_db
             .expect("safe unwrap")
             .read(|db| db.get(&blockstamp).cloned())?)
     } else {
-        Ok(dal_block)
+        Ok(opt_dal_block)
     }
 }
 
 /// Get block in local blockchain
 #[inline]
 pub fn get_block_in_local_blockchain(
-    db: &BinDB<LocalBlockchainV10Datas>,
-    block_id: BlockNumber,
+    db: &LocalBcDbV10,
+    block_number: BlockNumber,
 ) -> Result<Option<BlockDocument>, DALError> {
-    Ok(db.read(|db| {
-        if let Some(dal_block) = db.get(&block_id) {
-            Some(dal_block.block.clone())
-        } else {
-            None
+    Ok(get_dal_block_in_local_blockchain(db, block_number)?.map(|dal_block| dal_block.block))
+}
+
+/// Get block in local blockchain
+pub fn get_dal_block_in_local_blockchain(
+    local_bc_db: &LocalBcDbV10,
+    block_number: BlockNumber,
+) -> Result<Option<DALBlock>, DALError> {
+    local_bc_db.read(|r| local_bc_db.get(&r, block_number.0))
+    /*Ok(match bc_backend {
+        LocalBcDbV10::File { db, store } => {
+            db.read(|reader| to_dal_block(store.get(reader, block_number.0)?))?
         }
-    })?)
+        LocalBcDbV10::Mem(bc) => bc.get(&block_number).cloned(),
+    })*/
 }
 
 /// Get several blocks in local blockchain
-#[inline]
 pub fn get_blocks_in_local_blockchain(
-    db: &BinDB<LocalBlockchainV10Datas>,
+    bc_db: &LocalBcDbV10,
     first_block_number: BlockNumber,
-    count: u32,
+    mut count: u32,
 ) -> Result<Vec<BlockDocument>, DALError> {
-    Ok(db.read(|db| {
+    bc_db.read(|r| {
         let mut blocks = Vec::with_capacity(count as usize);
         let mut current_block_number = first_block_number;
-        while let Some(dal_block) = db.get(&current_block_number) {
-            blocks.push(dal_block.block.clone());
-            current_block_number = BlockNumber(current_block_number.0 + 1);
+        while let Some(dal_block) = bc_db.get(&r, current_block_number.0)? {
+            blocks.push(dal_block.block);
+            count -= 1;
+            if count > 0 {
+                current_block_number = BlockNumber(current_block_number.0 + 1);
+            } else {
+                return Ok(blocks);
+            }
+        }
+        Ok(blocks)
+    })
+    /*match bc_backend {
+        LocalBcDbV10::File { db, store } => {
+            while let Some(dal_block) =
+                db.read(|reader| to_dal_block(store.get(reader, current_block_number.0)?))?
+            {
+                blocks.push(dal_block.block);
+                count -= 1;
+                if count > 0 {
+                    current_block_number = BlockNumber(current_block_number.0 + 1);
+                } else {
+                    return Ok(blocks);
+                }
+            }
+        }
+        LocalBcDbV10::Mem(bc) => {
+            while let Some(dal_block) = bc.get(&current_block_number) {
+                blocks.push(dal_block.block.clone());
+                current_block_number = BlockNumber(current_block_number.0 + 1);
+            }
         }
-        blocks
-    })?)
+    }*/
 }
 
 /// Get current frame of calculating members
 pub fn get_current_frame(
     current_block: &DALBlock,
-    db: &BinDB<LocalBlockchainV10Datas>,
+    db: &LocalBcDbV10,
 ) -> Result<HashMap<PubKey, usize>, DALError> {
     let frame_begin =
         current_block.block.number().0 - current_block.block.current_frame_size() as u32;
-    Ok(db.read(|db| {
-        let mut current_frame: HashMap<PubKey, usize> = HashMap::new();
-        for block_number in frame_begin..current_block.block.number().0 {
-            let issuer = db
-                .get(&BlockNumber(block_number))
-                .unwrap_or_else(|| fatal_error!("Fail to get block #{} !", block_number))
-                .block
-                .issuers()[0];
-            let issuer_count_blocks = if let Some(issuer_count_blocks) = current_frame.get(&issuer)
-            {
-                issuer_count_blocks + 1
-            } else {
-                1
-            };
-            current_frame.insert(issuer, issuer_count_blocks);
-        }
-        current_frame
-    })?)
+
+    let blocks = get_blocks_in_local_blockchain(
+        db,
+        BlockNumber(frame_begin),
+        current_block.block.current_frame_size() as u32,
+    )?;
+
+    let mut current_frame: HashMap<PubKey, usize> = HashMap::new();
+    for block in blocks {
+        let issuer = block.issuers()[0];
+        let issuer_count_blocks = if let Some(issuer_count_blocks) = current_frame.get(&issuer) {
+            issuer_count_blocks + 1
+        } else {
+            1
+        };
+        current_frame.insert(issuer, issuer_count_blocks);
+    }
+
+    Ok(current_frame)
 }
diff --git a/lib/modules/blockchain/blockchain-dal/src/readers/certs.rs b/lib/modules/blockchain/blockchain-dal/src/readers/certs.rs
index f745d953c1fd8983009bac60aaba0c52d1907e68..63ab8fec5ea61a2700e03f78190fb3cf2fccc80a 100644
--- a/lib/modules/blockchain/blockchain-dal/src/readers/certs.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/readers/certs.rs
@@ -13,14 +13,14 @@
 // You should have received a copy of the GNU Affero General Public License
 // along with this program.  If not, see <https://www.gnu.org/licenses/>.
 
-use crate::{BinDB, CertsExpirV10Datas, DALError};
+use crate::{BinFreeStructDb, CertsExpirV10Datas, DALError};
 use dubp_common_doc::BlockNumber;
 use durs_wot::NodeId;
 use std::collections::HashMap;
 
 /// Find certifications that emitted in indicated blocks expiring
 pub fn find_expire_certs(
-    certs_db: &BinDB<CertsExpirV10Datas>,
+    certs_db: &BinFreeStructDb<CertsExpirV10Datas>,
     blocks_expiring: Vec<BlockNumber>,
 ) -> Result<HashMap<(NodeId, NodeId), BlockNumber>, DALError> {
     Ok(certs_db.read(|db| {
diff --git a/lib/modules/blockchain/blockchain-dal/src/readers/fork_tree.rs b/lib/modules/blockchain/blockchain-dal/src/readers/fork_tree.rs
index a61a9a2a25bdd6d5e95c66681135f524b3a50d69..37c448f6cc272819d30d08fe8b6334b0cb2e9793 100644
--- a/lib/modules/blockchain/blockchain-dal/src/readers/fork_tree.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/readers/fork_tree.rs
@@ -16,6 +16,13 @@
 use crate::*;
 use dubp_common_doc::Blockstamp;
 
+/// get current blockstamp
+pub fn get_current_blockstamp(forks_dbs: &ForksDBs) -> Result<Option<Blockstamp>, DALError> {
+    Ok(forks_dbs
+        .fork_tree_db
+        .read(|fork_tree| fork_tree.get_current_blockstamp())?)
+}
+
 /// Get stackables blocks
 pub fn get_stackables_blocks(
     forks_dbs: &ForksDBs,
diff --git a/lib/modules/blockchain/blockchain-dal/src/readers/identity.rs b/lib/modules/blockchain/blockchain-dal/src/readers/identity.rs
index 0e931916e8276ba72dceccc95a21b2334689f930..352a5133c8bcc2b7e63e3c8def7c7f8645d7f039 100644
--- a/lib/modules/blockchain/blockchain-dal/src/readers/identity.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/readers/identity.rs
@@ -15,7 +15,7 @@
 
 use crate::entities::identity::DALIdentity;
 use crate::filters::identities::IdentitiesFilter;
-use crate::{BinDB, DALError, IdentitiesV10Datas};
+use crate::{BinFreeStructDb, DALError, IdentitiesV10Datas};
 use dubp_common_doc::traits::Document;
 use dubp_common_doc::BlockNumber;
 use dup_crypto::keys::*;
@@ -24,7 +24,7 @@ use std::collections::HashMap;
 
 /// Get identities in databases
 pub fn get_identities(
-    db: &BinDB<IdentitiesV10Datas>,
+    db: &BinFreeStructDb<IdentitiesV10Datas>,
     filters: IdentitiesFilter,
     current_block_id: BlockNumber,
 ) -> Result<Vec<DALIdentity>, DALError> {
@@ -62,7 +62,7 @@ pub fn get_identities(
 
 /// Get identity in databases
 pub fn get_identity(
-    db: &BinDB<IdentitiesV10Datas>,
+    db: &BinFreeStructDb<IdentitiesV10Datas>,
     pubkey: &PubKey,
 ) -> Result<Option<DALIdentity>, DALError> {
     Ok(db.read(|db| {
@@ -76,7 +76,7 @@ pub fn get_identity(
 
 /// Get uid from pubkey
 pub fn get_uid(
-    identities_db: &BinDB<IdentitiesV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
     pubkey: PubKey,
 ) -> Result<Option<String>, DALError> {
     Ok(identities_db.read(|db| {
@@ -90,7 +90,7 @@ pub fn get_uid(
 
 /// Get pubkey from uid
 pub fn get_pubkey_from_uid(
-    identities_db: &BinDB<IdentitiesV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
     uid: &str,
 ) -> Result<Option<PubKey>, DALError> {
     Ok(identities_db.read(|db| {
@@ -105,7 +105,7 @@ pub fn get_pubkey_from_uid(
 
 /// Get wot_id index
 pub fn get_wot_index(
-    identities_db: &BinDB<IdentitiesV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
 ) -> Result<HashMap<PubKey, NodeId>, DALError> {
     Ok(identities_db.read(|db| {
         db.iter()
@@ -155,8 +155,9 @@ mod test {
         ];
 
         // Write mock identities in DB
-        let identities_db =
-            BinDB::Mem(open_memory_db::<IdentitiesV10Datas>().expect("Fail to create memory DB !"));
+        let identities_db = BinFreeStructDb::Mem(
+            open_free_struct_memory_db::<IdentitiesV10Datas>().expect("Fail to create memory DB !"),
+        );
         for idty in &mock_identities {
             identities_db.write(|db| {
                 db.insert(idty.idty_doc.issuers()[0], idty.clone());
diff --git a/lib/modules/blockchain/blockchain-dal/src/storage.rs b/lib/modules/blockchain/blockchain-dal/src/storage.rs
new file mode 100644
index 0000000000000000000000000000000000000000..041a2d0ebf07a0d583d0e9629041775a4d297b51
--- /dev/null
+++ b/lib/modules/blockchain/blockchain-dal/src/storage.rs
@@ -0,0 +1,66 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+use crate::DALError;
+
+/// Storage type
+pub enum StorageType {
+    Single,
+    SingleInteger,
+    Multi,
+    MultiInteger,
+}
+
+pub enum DB {
+    File(Arc<RwLock<Rkv>>),
+    Mem(),
+}
+
+impl DB {
+    /// Open database
+    pub fn open(db_path: Option<&PathBuf>) -> DB {
+        let bc_backend = if let Some(db_path) = db_path {
+            let mut manager = Manager::singleton()
+                .write()
+                .expect("fail to get rkb manager !");
+            let db = manager
+                .get_or_create(db_path.as_path(), Rkv::new)
+                .expect("Fail to open LMDB blockchain database !");
+            DB::File(db)
+        } else {
+            DB::Mem()
+        };
+    }
+    /// Open integer storage (astorage is like a table or collection)
+    pub fn open_integer_storage(&self, storage_name: &str) -> Result<IntegerStore<u32>, DalError> {
+        let rkv = self.clone().read().expect("Fail to read lock Rkv");
+        rkv.open_integer(storage_name, StoreOptions::create())?;
+    }
+}
+
+/*pub trait MapStorage<K, V> {
+
+    open(Option<&PathBuf>) -> Result<Self, DALError>;
+
+    get(&self, key: &K) -> Result<Option<V>, DALError>;
+    put(&self, key: K, value: V) -> Result<(), DALError>;
+    delete(&self, key: &K) -> Result<(), DALError>;
+
+    get_values(&self, keys: Vec<&K>) -> Result<Vec<(&K, Option<V>)>, DALError>;
+    put_values(&self, datas: Vec<(K, V)>) -> Result<Vec<()>, DALError>;
+    delete_values(&self, keys: Vec<&K>) -> Result<(), DALError>;
+
+    fn save(&self) -> Result<(), DALError>;
+}*/
diff --git a/lib/modules/blockchain/blockchain-dal/src/storage/local_blockchain.rs b/lib/modules/blockchain/blockchain-dal/src/storage/local_blockchain.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a5cf34f2ff2361f84328ba61fce24de576e5378b
--- /dev/null
+++ b/lib/modules/blockchain/blockchain-dal/src/storage/local_blockchain.rs
@@ -0,0 +1,42 @@
+//  Copyright (C) 2017-2019  The AXIOM TEAM Association.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+//! Local blockchain storage
+
+use crate::entities::block::DALBlock;
+use super::MapStorage;
+use dubp_common_doc::BlockNumber;
+use fnv::FnvHashMap;
+use rkv::{IntegerStore, Manager, Rkv, StoreOptions, Value};
+
+impl MapStorage<BlockNumber, DALBlock> {
+
+    open(Option<&PathBuf>) -> Result<Self, DALError>;
+
+    get(&self, key: &K) -> Result<Option<V>, DALError>;
+    put(&self, key: K, value: V) -> Result<(), DALError>;
+    delete(&self, key: &K) -> Result<(), DALError>;
+
+    get_values(&self, keys: Vec<&K>) -> Result<Vec<(&K, Option<V>)>, DALError>;
+    put_values(&self, datas: Vec<(K, V)>) -> Result<Vec<()>, DALError>;
+    delete_values(&self, keys: Vec<&K>) -> Result<(), DALError>;
+
+    fn save(&self) -> Result<(), DALError> {
+        if let Some(file_backend) = self.open_file_backend() {
+            file_backend.sync(true)?;
+        }
+        Ok(())
+    }
+}
\ No newline at end of file
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/block.rs b/lib/modules/blockchain/blockchain-dal/src/writers/block.rs
index 3e8f991d19cd36e6aae504d00317611b553cdc09..f0637b096ce33666dff237a331f5bf35ba8c9031 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/block.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/block.rs
@@ -14,44 +14,76 @@
 // along with this program.  If not, see <https://www.gnu.org/licenses/>.
 
 use crate::entities::block::DALBlock;
+use crate::DALError;
 use crate::*;
-use crate::{BinDB, DALError, LocalBlockchainV10Datas};
 use dubp_block_doc::block::BlockDocumentTrait;
 use dubp_common_doc::traits::Document;
 use unwrap::unwrap;
 
 /// Insert new head Block in databases
 pub fn insert_new_head_block(
-    blockchain_db: &BinDB<LocalBlockchainV10Datas>,
-    forks_dbs: &ForksDBs,
+    blockchain_db: &mut LocalBcDbV10,
+    forks_dbs: Option<&ForksDBs>,
     dal_block: DALBlock,
 ) -> Result<(), DALError> {
-    // Insert head block in blockchain
-    blockchain_db.write(|db| {
-        db.insert(dal_block.block.number(), dal_block.clone());
-    })?;
+    /*match blockchain_db {
+        LocalBcDbV10::File { db, store } => {
+            let bin_dal_block = bincode::serialize(&dal_block)?;
 
-    // Insert head block in fork tree
-    let removed_blockstamps = crate::writers::fork_tree::insert_new_head_block(
-        &forks_dbs.fork_tree_db,
-        dal_block.blockstamp(),
-    )?;
+            db.write(|writer| {
+                store.put(
+                    writer,
+                    *dal_block.block.number(),
+                    &Value::Blob(&bin_dal_block),
+                )?;
+                Ok(KvDbWriteEnd::Commit)
+            })?;
+        }
+        LocalBcDbV10::Mem(ref mut bc) => {
+            bc.insert(dal_block.block.number(), dal_block.clone());
+        }
+    }*/
 
-    // Insert head block in ForksBlocks
-    forks_dbs.fork_blocks_db.write(|db| {
-        db.insert(dal_block.blockstamp(), dal_block);
+    // Insert head block in blockchain
+    //blockchain_db.write(vec![(*dal_block.block.number(), &dal_block)], vec![])?;
+    blockchain_db.write(|mut w| {
+        blockchain_db.put(&mut w, *dal_block.block.number(), &dal_block)?;
+        Ok(w)
     })?;
 
-    // Remove too old blocks
-    forks_dbs.fork_blocks_db.write(|db| {
-        for blockstamp in removed_blockstamps {
-            db.remove(&blockstamp);
-        }
-    })?;
+    if let Some(forks_dbs) = forks_dbs {
+        // Insert head block in fork tree
+        let removed_blockstamps = crate::writers::fork_tree::insert_new_head_block(
+            &forks_dbs.fork_tree_db,
+            dal_block.blockstamp(),
+        )?;
+
+        // Insert head block in ForksBlocks
+        forks_dbs.fork_blocks_db.write(|db| {
+            db.insert(dal_block.blockstamp(), dal_block);
+        })?;
 
+        // Remove too old blocks
+        forks_dbs.fork_blocks_db.write(|db| {
+            for blockstamp in removed_blockstamps {
+                db.remove(&blockstamp);
+            }
+        })?;
+    }
     Ok(())
 }
 
+/// Remove a block in local blockchain storage
+pub fn remove_block(
+    blockchain_db: &mut LocalBcDbV10,
+    block_number: BlockNumber,
+) -> Result<(), DALError> {
+    blockchain_db.write(|mut w| {
+        blockchain_db.delete(&mut w, block_number.0)?;
+        Ok(w)
+    })
+}
+
 /// Insert new fork Block in databases
 pub fn insert_new_fork_block(forks_dbs: &ForksDBs, dal_block: DALBlock) -> Result<bool, DALError> {
     if crate::writers::fork_tree::insert_new_fork_block(
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/certification.rs b/lib/modules/blockchain/blockchain-dal/src/writers/certification.rs
index 63cd60e48d6caa8600c82747d88fbcecc8575e10..2918629de6bf24210718b10f3b7f296572eb3b15 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/certification.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/certification.rs
@@ -13,7 +13,7 @@
 // You should have received a copy of the GNU Affero General Public License
 // along with this program.  If not, see <https://www.gnu.org/licenses/>.
 
-use crate::{BinDB, CertsExpirV10Datas, DALError, IdentitiesV10Datas};
+use crate::{BinFreeStructDb, CertsExpirV10Datas, DALError, IdentitiesV10Datas};
 use dubp_common_doc::BlockNumber;
 use dubp_currency_params::CurrencyParameters;
 use dubp_user_docs::documents::certification::CompactCertificationDocumentV10;
@@ -23,8 +23,8 @@ use durs_wot::NodeId;
 /// Apply "certification" event in databases
 pub fn write_certification(
     currency_params: &CurrencyParameters,
-    identities_db: &BinDB<IdentitiesV10Datas>,
-    certs_db: &BinDB<CertsExpirV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
+    certs_db: &BinFreeStructDb<CertsExpirV10Datas>,
     source_pubkey: PubKey,
     source: NodeId,
     target: NodeId,
@@ -56,8 +56,8 @@ pub fn write_certification(
 
 /// Revert writtent certification
 pub fn revert_write_cert(
-    identities_db: &BinDB<IdentitiesV10Datas>,
-    certs_db: &BinDB<CertsExpirV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
+    certs_db: &BinFreeStructDb<CertsExpirV10Datas>,
     compact_doc: CompactCertificationDocumentV10,
     source: NodeId,
     target: NodeId,
@@ -83,7 +83,7 @@ pub fn revert_write_cert(
 
 /// Revert "certification expiry" event in databases
 pub fn revert_expire_cert(
-    certs_db: &BinDB<CertsExpirV10Datas>,
+    certs_db: &BinFreeStructDb<CertsExpirV10Datas>,
     source: NodeId,
     target: NodeId,
     created_block_id: BlockNumber,
@@ -99,7 +99,7 @@ pub fn revert_expire_cert(
 
 /// Apply "certification expiry" event in databases
 pub fn expire_certs(
-    certs_db: &BinDB<CertsExpirV10Datas>,
+    certs_db: &BinFreeStructDb<CertsExpirV10Datas>,
     created_block_id: BlockNumber,
 ) -> Result<(), DALError> {
     // Remove CertsExpirV10Datas entries
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/dividend.rs b/lib/modules/blockchain/blockchain-dal/src/writers/dividend.rs
index 601e13f193a7de7c29a7d0b31e2fe69df4fc8abb..25af8596ed74846898b981c945efd08db60f27ff 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/dividend.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/dividend.rs
@@ -22,8 +22,8 @@ use std::collections::{HashMap, HashSet};
 
 /// Apply UD creation in databases
 pub fn create_du(
-    du_db: &BinDB<UDsV10Datas>,
-    balances_db: &BinDB<BalancesV10Datas>,
+    du_db: &BinFreeStructDb<UDsV10Datas>,
+    balances_db: &BinFreeStructDb<BalancesV10Datas>,
     du_amount: &SourceAmount,
     du_block_id: BlockNumber,
     members: &[PubKey],
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/fork_tree.rs b/lib/modules/blockchain/blockchain-dal/src/writers/fork_tree.rs
index d95cf6c005de9381daeb82dbd29ba6b871e981c9..c1ed3f586eb588d029b4b15252618e285c9f5f5e 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/fork_tree.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/fork_tree.rs
@@ -20,7 +20,7 @@ use dubp_common_doc::BlockHash;
 /// Insert new head Block in fork tree,
 /// return vector of removed blockstamps
 pub fn insert_new_head_block(
-    fork_tree_db: &BinDB<ForksTreeV10Datas>,
+    fork_tree_db: &BinFreeStructDb<ForksTreeV10Datas>,
     blockstamp: Blockstamp,
 ) -> Result<Vec<Blockstamp>, DALError> {
     fork_tree_db.write(|fork_tree| {
@@ -39,7 +39,7 @@ pub fn insert_new_head_block(
 /// Insert new fork block in fork tree only if parent exist in fork tree (orphan block not inserted)
 /// Returns true if block has a parent and has therefore been inserted, return false if block is orphaned
 pub fn insert_new_fork_block(
-    fork_tree_db: &BinDB<ForksTreeV10Datas>,
+    fork_tree_db: &BinFreeStructDb<ForksTreeV10Datas>,
     blockstamp: Blockstamp,
     previous_hash: Hash,
 ) -> Result<bool, DALError> {
@@ -97,12 +97,12 @@ mod test {
         // Create mock datas
         let blockstamps =
             dubp_user_docs_tests_tools::mocks::generate_blockstamps(*DEFAULT_FORK_WINDOW_SIZE + 2);
-        let fork_tree_db = open_db::<ForksTreeV10Datas>(None, "")?;
+        let fork_tree_db = open_free_struct_db::<ForksTreeV10Datas>(None, "")?;
 
         // Insert genesis block
         assert_eq!(
-            Ok(vec![]),
-            insert_new_head_block(&fork_tree_db, blockstamps[0])
+            Vec::<Blockstamp>::with_capacity(0),
+            insert_new_head_block(&fork_tree_db, blockstamps[0])?
         );
 
         // Check tree state
@@ -115,8 +115,8 @@ mod test {
         // Insert FORK_WINDOW_SIZE blocks
         for i in 1..*DEFAULT_FORK_WINDOW_SIZE {
             assert_eq!(
-                Ok(vec![]),
-                insert_new_head_block(&fork_tree_db, blockstamps[i])
+                Vec::<Blockstamp>::with_capacity(0),
+                insert_new_head_block(&fork_tree_db, blockstamps[i])?
             );
         }
 
@@ -135,12 +135,12 @@ mod test {
 
         // Insert blocks after FORK_WINDOW_SIZE (firsts blocks must be removed)
         assert_eq!(
-            Ok(vec![blockstamps[0]]),
-            insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE])
+            vec![blockstamps[0]],
+            insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE])?
         );
         assert_eq!(
-            Ok(vec![blockstamps[1]]),
-            insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE + 1])
+            vec![blockstamps[1]],
+            insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE + 1])?
         );
 
         Ok(())
@@ -151,13 +151,13 @@ mod test {
         // Create mock datas
         let blockstamps =
             dubp_user_docs_tests_tools::mocks::generate_blockstamps(*DEFAULT_FORK_WINDOW_SIZE + 3);
-        let fork_tree_db = open_db::<ForksTreeV10Datas>(None, "")?;
+        let fork_tree_db = open_free_struct_db::<ForksTreeV10Datas>(None, "")?;
 
         // Insert 4 main blocks
         for i in 0..4 {
             assert_eq!(
-                Ok(vec![]),
-                insert_new_head_block(&fork_tree_db, blockstamps[i])
+                Vec::<Blockstamp>::with_capacity(0),
+                insert_new_head_block(&fork_tree_db, blockstamps[i])?
             );
         }
 
@@ -174,8 +174,8 @@ mod test {
             hash: BlockHash(dup_crypto_tests_tools::mocks::hash('A')),
         };
         assert_eq!(
-            Ok(true),
-            insert_new_fork_block(&fork_tree_db, fork_blockstamp, blockstamps[2].hash.0)
+            true,
+            insert_new_fork_block(&fork_tree_db, fork_blockstamp, blockstamps[2].hash.0)?
         );
 
         // Check tree state
@@ -194,8 +194,8 @@ mod test {
             hash: BlockHash(dup_crypto_tests_tools::mocks::hash('B')),
         };
         assert_eq!(
-            Ok(true),
-            insert_new_fork_block(&fork_tree_db, fork_blockstamp_2, fork_blockstamp.hash.0)
+            true,
+            insert_new_fork_block(&fork_tree_db, fork_blockstamp_2, fork_blockstamp.hash.0)?
         );
 
         // Check tree state
@@ -211,8 +211,8 @@ mod test {
         // Insert FORK_WINDOW_SIZE blocks
         for i in 4..*DEFAULT_FORK_WINDOW_SIZE {
             assert_eq!(
-                Ok(vec![]),
-                insert_new_head_block(&fork_tree_db, blockstamps[i])
+                Vec::<Blockstamp>::with_capacity(0),
+                insert_new_head_block(&fork_tree_db, blockstamps[i])?
             );
         }
 
@@ -235,15 +235,15 @@ mod test {
         // Insert 2 new main blocks (too old blocks must be removed)
         for i in 0..2 {
             assert_eq!(
-                Ok(vec![blockstamps[i]]),
-                insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE + i])
+                vec![blockstamps[i]],
+                insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE + i])?
             );
         }
 
         // Insert one new main block (fork branch must be removed)
         assert_eq!(
-            Ok(vec![blockstamps[2], fork_blockstamp_2, fork_blockstamp]),
-            insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE + 2])
+            vec![blockstamps[2], fork_blockstamp_2, fork_blockstamp],
+            insert_new_head_block(&fork_tree_db, blockstamps[*DEFAULT_FORK_WINDOW_SIZE + 2])?
         );
 
         // Check tree state
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/identity.rs b/lib/modules/blockchain/blockchain-dal/src/writers/identity.rs
index b9ed989947e9f373dd04b0163ef101bb296a186d..141b820fae2c698d488b457826d3dc5ee836f40e 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/identity.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/identity.rs
@@ -14,7 +14,7 @@
 // along with this program.  If not, see <https://www.gnu.org/licenses/>.
 
 use crate::entities::identity::{DALIdentity, DALIdentityState};
-use crate::{BinDB, DALError, IdentitiesV10Datas, MsExpirV10Datas};
+use crate::{BinFreeStructDb, DALError, IdentitiesV10Datas, MsExpirV10Datas};
 use dubp_common_doc::traits::Document;
 use dubp_common_doc::{BlockNumber, Blockstamp};
 use dubp_currency_params::CurrencyParameters;
@@ -25,8 +25,8 @@ use durs_wot::NodeId;
 
 /// Remove identity from databases
 pub fn revert_create_identity(
-    identities_db: &BinDB<IdentitiesV10Datas>,
-    ms_db: &BinDB<MsExpirV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
+    ms_db: &BinFreeStructDb<MsExpirV10Datas>,
     pubkey: &PubKey,
 ) -> Result<(), DALError> {
     let dal_idty = identities_db.read(|db| {
@@ -53,8 +53,8 @@ pub fn revert_create_identity(
 /// Write identity in databases
 pub fn create_identity(
     currency_params: &CurrencyParameters,
-    identities_db: &BinDB<IdentitiesV10Datas>,
-    ms_db: &BinDB<MsExpirV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
+    ms_db: &BinFreeStructDb<MsExpirV10Datas>,
     idty_doc: &IdentityDocumentV10,
     ms_created_block_id: BlockNumber,
     wot_id: NodeId,
@@ -90,7 +90,7 @@ pub fn create_identity(
 
 /// Apply "exclude identity" event
 pub fn exclude_identity(
-    identities_db: &BinDB<IdentitiesV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
     pubkey: &PubKey,
     exclusion_blockstamp: &Blockstamp,
     revert: bool,
@@ -127,7 +127,7 @@ pub fn exclude_identity(
 
 /// Apply "revoke identity" event
 pub fn revoke_identity(
-    identities_db: &BinDB<IdentitiesV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
     pubkey: &PubKey,
     renewal_blockstamp: &Blockstamp,
     explicit: bool,
@@ -178,8 +178,8 @@ pub fn revoke_identity(
 /// Apply "renewal identity" event in databases
 pub fn renewal_identity(
     currency_params: &CurrencyParameters,
-    identities_db: &BinDB<IdentitiesV10Datas>,
-    ms_db: &BinDB<MsExpirV10Datas>,
+    identities_db: &BinFreeStructDb<IdentitiesV10Datas>,
+    ms_db: &BinFreeStructDb<MsExpirV10Datas>,
     pubkey: &PubKey,
     idty_wot_id: NodeId,
     renewal_timestamp: u64,
@@ -241,7 +241,10 @@ pub fn renewal_identity(
 }
 
 /// Remove identity from databases
-pub fn remove_identity(db: &BinDB<IdentitiesV10Datas>, pubkey: PubKey) -> Result<(), DALError> {
+pub fn remove_identity(
+    db: &BinFreeStructDb<IdentitiesV10Datas>,
+    pubkey: PubKey,
+) -> Result<(), DALError> {
     db.write(|db| {
         db.remove(&pubkey);
     })?;
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/requests.rs b/lib/modules/blockchain/blockchain-dal/src/writers/requests.rs
index 7ece8baf5e3e16e099cd1d362bd0cdcfac57c53c..17e4fd40ec2322b45b5cd554dfabee20c4109060 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/requests.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/requests.rs
@@ -57,7 +57,7 @@ impl BlocksDBsWriteQuery {
     /// BlocksDBsWriteQuery
     pub fn apply(
         self,
-        blockchain_db: &BinDB<LocalBlockchainV10Datas>,
+        blockchain_db: &mut LocalBcDbV10,
         forks_db: &ForksDBs,
         fork_window_size: usize,
         sync_target: Option<Blockstamp>,
@@ -70,20 +70,14 @@ impl BlocksDBsWriteQuery {
                     || dal_block.blockstamp().id.0 + fork_window_size as u32
                         >= sync_target.expect("safe unwrap").id.0
                 {
-                    super::block::insert_new_head_block(blockchain_db, forks_db, dal_block)?;
+                    super::block::insert_new_head_block(blockchain_db, Some(forks_db), dal_block)?;
                 } else {
-                    // Insert block in blockchain
-                    blockchain_db.write(|db| {
-                        db.insert(dal_block.block.number(), dal_block);
-                    })?;
+                    super::block::insert_new_head_block(blockchain_db, None, dal_block)?;
                 }
             }
             BlocksDBsWriteQuery::RevertBlock(dal_block) => {
                 trace!("BlocksDBsWriteQuery::WriteBlock...");
-                // Remove block in blockchain
-                blockchain_db.write(|db| {
-                    db.remove(&dal_block.block.number());
-                })?;
+                super::block::remove_block(blockchain_db, dal_block.block.number())?;
                 trace!("BlocksDBsWriteQuery::WriteBlock...finish");
             }
         }
diff --git a/lib/modules/blockchain/blockchain-dal/src/writers/transaction.rs b/lib/modules/blockchain/blockchain-dal/src/writers/transaction.rs
index a9229cd85b8b952b9516b0927fec98cb7697e943..8714a46df2d23c85c1dd8e7ed7fbb8ac089dc08a 100644
--- a/lib/modules/blockchain/blockchain-dal/src/writers/transaction.rs
+++ b/lib/modules/blockchain/blockchain-dal/src/writers/transaction.rs
@@ -20,7 +20,7 @@ use crate::entities::sources::{SourceAmount, UTXOV10};
 use crate::*;
 use dubp_indexes::sindex::{SourceUniqueIdV10, UniqueIdUTXOv10};
 
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug)]
 /// Transaction error
 pub enum TxError {
     /// UnkonwError
diff --git a/lib/modules/blockchain/blockchain/Cargo.toml b/lib/modules/blockchain/blockchain/Cargo.toml
index 14fdd28139c0969a21239dae704b2ecbb1b0d167..a07b632f8f7c41defda60151e1a5cfddab56c19a 100644
--- a/lib/modules/blockchain/blockchain/Cargo.toml
+++ b/lib/modules/blockchain/blockchain/Cargo.toml
@@ -29,7 +29,7 @@ json-pest-parser = { path = "../../../tools/json-pest-parser" }
 log = "0.4.*"
 num_cpus = "1.10.*"
 pbr = "1.0.*"
-prettytable-rs = "0.8.0"
+prettytable-rs = "0.8.*"
 rayon = "1.0.3"
 serde = "1.0.*"
 serde_json = "1.0.*"
diff --git a/lib/modules/blockchain/blockchain/src/dbex.rs b/lib/modules/blockchain/blockchain/src/dbex.rs
index 1465341e92139ef8426145ca5061a63e118f900d..76054bc8e9377b97c55f228f7266c3f968ea3ee9 100644
--- a/lib/modules/blockchain/blockchain/src/dbex.rs
+++ b/lib/modules/blockchain/blockchain/src/dbex.rs
@@ -94,7 +94,7 @@ pub fn dbex_bc(profile_path: PathBuf, _csv: bool, _query: DbExBcQuery) -> Result
     // Open databases
     let load_dbs_begin = SystemTime::now();
     let blocks_db = BlocksV10DBs::open(Some(&db_path));
-    //let forks_dbs = ForksDBs::open(Some(&db_path));
+    let forks_dbs = ForksDBs::open(Some(&db_path));
     let wot_databases = WotsV10DBs::open(Some(&db_path));
 
     let load_dbs_duration = SystemTime::now()
@@ -107,7 +107,7 @@ pub fn dbex_bc(profile_path: PathBuf, _csv: bool, _query: DbExBcQuery) -> Result
     );
 
     if let Some(current_blockstamp) =
-        durs_blockchain_dal::readers::block::get_current_blockstamp(&blocks_db)?
+        durs_blockchain_dal::readers::fork_tree::get_current_blockstamp(&forks_dbs)?
     {
         println!("Current block: #{}.", current_blockstamp);
         if let Some(current_block) = durs_blockchain_dal::readers::block::get_block(
@@ -294,8 +294,9 @@ pub fn dbex_wot(profile_path: PathBuf, csv: bool, query: &DbExWotQuery) {
         .expect("Fail to read IdentitiesDB !");
 
     // Open wot db
-    let wot_db = BinDB::File(
-        open_file_db::<RustyWebOfTrust>(&db_path, "wot.db").expect("Fail to open WotDB !"),
+    let wot_db = BinFreeStructDb::File(
+        open_free_struct_file_db::<RustyWebOfTrust>(&db_path, "wot.db")
+            .expect("Fail to open WotDB !"),
     );
 
     // Print wot blockstamp
@@ -365,19 +366,23 @@ pub fn dbex_wot(profile_path: PathBuf, csv: bool, query: &DbExWotQuery) {
         }
         DbExWotQuery::ExpireMembers(ref reverse) => {
             // Open blockchain database
-            let blockchain_db = open_file_db::<LocalBlockchainV10Datas>(&db_path, "blockchain.db")
-                .expect("Fail to open blockchain db");
+            let blockchain_db = LocalBcDbV10::open(
+                Some(&db_path),
+                durs_blockchain_dal::constants::LOCAL_BLOCKCHAIN_COLLECTION_NAME,
+            )
+            .expect("Fail to open LocalBcDbV10");
             // Get blocks_times
-            let (current_bc_time, blocks_times): (u64, HashMap<BlockNumber, u64>) = blockchain_db
-                .read(|db| {
-                    (
-                        db[&BlockNumber(db.len() as u32 - 1)].block.common_time(),
-                        db.iter()
-                            .map(|(block_id, dal_block)| (*block_id, dal_block.block.common_time()))
-                            .collect(),
-                    )
-                })
-                .expect("Fail to read blockchain db");
+            let all_blocks = durs_blockchain_dal::readers::block::get_blocks_in_local_blockchain(
+                &blockchain_db,
+                BlockNumber(0),
+                10_000_000,
+            )
+            .expect("Fail to get all blocks");
+            let current_bc_time = all_blocks.last().expect("empty blockchain").common_time();
+            let blocks_times: HashMap<BlockNumber, u64> = all_blocks
+                .iter()
+                .map(|block| (block.number(), block.common_time()))
+                .collect();
             // Get expire_dates
             let min_created_ms_time = current_bc_time - currency_params.ms_validity;
             let mut expire_dates: Vec<(NodeId, u64)> = wot_databases
diff --git a/lib/modules/blockchain/blockchain/src/dubp/apply/mod.rs b/lib/modules/blockchain/blockchain/src/dubp/apply/mod.rs
index 1ecda1f812ac0ded6050229a7a8a7ab2b190f716..4e2a839b35d95c725ee856d4d1050ef7fc368b07 100644
--- a/lib/modules/blockchain/blockchain/src/dubp/apply/mod.rs
+++ b/lib/modules/blockchain/blockchain/src/dubp/apply/mod.rs
@@ -23,7 +23,7 @@ use dup_crypto::keys::*;
 use durs_blockchain_dal::entities::block::DALBlock;
 use durs_blockchain_dal::entities::sources::SourceAmount;
 use durs_blockchain_dal::writers::requests::*;
-use durs_blockchain_dal::BinDB;
+use durs_blockchain_dal::BinFreeStructDb;
 use durs_common_tools::fatal_error;
 use durs_wot::data::NewLinkResult;
 use durs_wot::{NodeId, WebOfTrust};
@@ -49,7 +49,7 @@ pub enum ApplyValidBlockError {
 pub fn apply_valid_block<W: WebOfTrust>(
     block: BlockDocument,
     wot_index: &mut HashMap<PubKey, NodeId>,
-    wot_db: &BinDB<W>,
+    wot_db: &BinFreeStructDb<W>,
     expire_certs: &HashMap<(NodeId, NodeId), BlockNumber>,
 ) -> Result<ValidBlockApplyReqs, ApplyValidBlockError> {
     match block {
@@ -62,7 +62,7 @@ pub fn apply_valid_block<W: WebOfTrust>(
 pub fn apply_valid_block_v10<W: WebOfTrust>(
     mut block: BlockDocumentV10,
     wot_index: &mut HashMap<PubKey, NodeId>,
-    wot_db: &BinDB<W>,
+    wot_db: &BinFreeStructDb<W>,
     expire_certs: &HashMap<(NodeId, NodeId), BlockNumber>,
 ) -> Result<ValidBlockApplyReqs, ApplyValidBlockError> {
     debug!(
diff --git a/lib/modules/blockchain/blockchain/src/dubp/check/mod.rs b/lib/modules/blockchain/blockchain/src/dubp/check/mod.rs
index 4ad8c2698152135cf03d980312cf0921a1ed1988..81b8c276dfa471f6db5cb468aac15c7af226439f 100644
--- a/lib/modules/blockchain/blockchain/src/dubp/check/mod.rs
+++ b/lib/modules/blockchain/blockchain/src/dubp/check/mod.rs
@@ -34,10 +34,10 @@ pub enum InvalidBlockError {
 
 pub fn verify_block_validity<W: WebOfTrust>(
     block: &BlockDocument,
-    blockchain_db: &BinDB<LocalBlockchainV10Datas>,
-    _certs_db: &BinDB<CertsExpirV10Datas>,
+    blockchain_db: &LocalBcDbV10,
+    _certs_db: &BinFreeStructDb<CertsExpirV10Datas>,
     _wot_index: &HashMap<PubKey, NodeId>,
-    _wot_db: &BinDB<W>,
+    _wot_db: &BinFreeStructDb<W>,
 ) -> Result<(), BlockError> {
     // Rules that do not concern genesis block
     if block.number().0 > 0 {
diff --git a/lib/modules/blockchain/blockchain/src/dubp/mod.rs b/lib/modules/blockchain/blockchain/src/dubp/mod.rs
index 5023e3f9425160a017438cf93e892e68b44220b3..7d1e983304afd3ae0b2f2bc659e07b7d8c14f8f3 100644
--- a/lib/modules/blockchain/blockchain/src/dubp/mod.rs
+++ b/lib/modules/blockchain/blockchain/src/dubp/mod.rs
@@ -35,7 +35,7 @@ pub enum CheckAndApplyBlockReturn {
     OrphanBlock,
 }
 
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug)]
 pub enum BlockError {
     AlreadyHaveBlock,
     BlockOrOutForkWindow,
diff --git a/lib/modules/blockchain/blockchain/src/dunp/receiver.rs b/lib/modules/blockchain/blockchain/src/dunp/receiver.rs
index 3fb479a61e0cf72f71b5a52ce602c3c8a99534c3..90efc85c9a7b8d725bd4b44c87170d7c71626d64 100644
--- a/lib/modules/blockchain/blockchain/src/dunp/receiver.rs
+++ b/lib/modules/blockchain/blockchain/src/dunp/receiver.rs
@@ -53,7 +53,7 @@ pub fn receive_blocks(bc: &mut BlockchainModule, blocks: Vec<BlockDocument>) {
                     // Apply db requests
                     bc_db_query
                         .apply(
-                            &bc.blocks_databases.blockchain_db,
+                            &mut bc.blocks_databases.blockchain_db,
                             &bc.forks_dbs,
                             unwrap!(bc.currency_params).fork_window_size,
                             None,
diff --git a/lib/modules/blockchain/blockchain/src/fork/fork_algo.rs b/lib/modules/blockchain/blockchain/src/fork/fork_algo.rs
index 789b05eac21e58bb5f21282ec9de0b43e8dd5d72..0f7eb5bb2faa8d4a3bc7d7a4419c342c47beb9ea 100644
--- a/lib/modules/blockchain/blockchain/src/fork/fork_algo.rs
+++ b/lib/modules/blockchain/blockchain/src/fork/fork_algo.rs
@@ -110,7 +110,7 @@ mod tests {
         let fork_window_size = *dubp_currency_params::constants::DEFAULT_FORK_WINDOW_SIZE;
 
         // Open empty databases in memory mode
-        let bc_dbs = BlocksV10DBs::open(None);
+        let mut bc_dbs = BlocksV10DBs::open(None);
         let forks_dbs = ForksDBs::open(None);
 
         // Begin with no invalid blocks
@@ -126,8 +126,8 @@ mod tests {
         // Insert mock blocks in forks_dbs
         for block in &main_branch {
             durs_blockchain_dal::writers::block::insert_new_head_block(
-                &bc_dbs.blockchain_db,
-                &forks_dbs,
+                &mut bc_dbs.blockchain_db,
+                Some(&forks_dbs),
                 DALBlock {
                     block: block.clone(),
                     expire_certs: None,
diff --git a/lib/modules/blockchain/blockchain/src/fork/revert_block.rs b/lib/modules/blockchain/blockchain/src/fork/revert_block.rs
index 2f1da648b112d54b38635fa6ead7dc2435d38ee2..f9ca988c889db26c3c749450a1613b5e2c8b9b44 100644
--- a/lib/modules/blockchain/blockchain/src/fork/revert_block.rs
+++ b/lib/modules/blockchain/blockchain/src/fork/revert_block.rs
@@ -25,7 +25,7 @@ use durs_blockchain_dal::entities::block::DALBlock;
 use durs_blockchain_dal::entities::sources::SourceAmount;
 use durs_blockchain_dal::writers::requests::*;
 use durs_blockchain_dal::writers::transaction::DALTxV10;
-use durs_blockchain_dal::{BinDB, DALError, TxV10Datas};
+use durs_blockchain_dal::{BinFreeStructDb, DALError, TxV10Datas};
 use durs_common_tools::fatal_error;
 use durs_wot::data::{NewLinkResult, RemLinkResult};
 use durs_wot::{NodeId, WebOfTrust};
@@ -41,7 +41,7 @@ pub struct ValidBlockRevertReqs {
     pub currency_queries: Vec<CurrencyDBsWriteQuery>,
 }
 
-#[derive(Debug, Copy, Clone)]
+#[derive(Debug)]
 /// RevertValidBlockError
 pub enum RevertValidBlockError {
     ExcludeUnknowNodeId(),
@@ -58,8 +58,8 @@ impl From<DALError> for RevertValidBlockError {
 pub fn revert_block<W: WebOfTrust>(
     dal_block: DALBlock,
     wot_index: &mut HashMap<PubKey, NodeId>,
-    wot_db: &BinDB<W>,
-    txs_db: &BinDB<TxV10Datas>,
+    wot_db: &BinFreeStructDb<W>,
+    txs_db: &BinFreeStructDb<TxV10Datas>,
 ) -> Result<ValidBlockRevertReqs, RevertValidBlockError> {
     match dal_block.block {
         BlockDocument::V10(block_v10) => revert_block_v10(
@@ -76,8 +76,8 @@ pub fn revert_block_v10<W: WebOfTrust>(
     mut block: BlockDocumentV10,
     expire_certs: HashMap<(NodeId, NodeId), BlockNumber>,
     wot_index: &mut HashMap<PubKey, NodeId>,
-    wot_db: &BinDB<W>,
-    txs_db: &BinDB<TxV10Datas>,
+    wot_db: &BinFreeStructDb<W>,
+    txs_db: &BinFreeStructDb<TxV10Datas>,
 ) -> Result<ValidBlockRevertReqs, RevertValidBlockError> {
     // Get transactions
     let dal_txs: Vec<DALTxV10> = block
diff --git a/lib/modules/blockchain/blockchain/src/fork/rollback.rs b/lib/modules/blockchain/blockchain/src/fork/rollback.rs
index a4bbf7633c0413ca27e61440f1be7a3fb3bd992a..5ba4bcdbbee4ba3465e7a0fb313bd4b426dbb7da 100644
--- a/lib/modules/blockchain/blockchain/src/fork/rollback.rs
+++ b/lib/modules/blockchain/blockchain/src/fork/rollback.rs
@@ -59,7 +59,7 @@ pub fn apply_rollback(bc: &mut BlockchainModule, new_bc_branch: Vec<Blockstamp>)
             // Apply db requests
             block_query
                 .apply(
-                    &bc.blocks_databases.blockchain_db,
+                    &mut bc.blocks_databases.blockchain_db,
                     &bc.forks_dbs,
                     unwrap!(bc.currency_params).fork_window_size,
                     None,
@@ -99,7 +99,7 @@ pub fn apply_rollback(bc: &mut BlockchainModule, new_bc_branch: Vec<Blockstamp>)
                 // Apply db requests
                 bc_db_query
                     .apply(
-                        &bc.blocks_databases.blockchain_db,
+                        &mut bc.blocks_databases.blockchain_db,
                         &bc.forks_dbs,
                         unwrap!(bc.currency_params).fork_window_size,
                         None,
diff --git a/lib/modules/blockchain/blockchain/src/fork/stackable_blocks.rs b/lib/modules/blockchain/blockchain/src/fork/stackable_blocks.rs
index 8f0e348b73958efcbf33f909c77053292868ae34..f817f2e7be638e8c4cbf38e7b6cb2f9c7b22f3ad 100644
--- a/lib/modules/blockchain/blockchain/src/fork/stackable_blocks.rs
+++ b/lib/modules/blockchain/blockchain/src/fork/stackable_blocks.rs
@@ -47,7 +47,7 @@ pub fn apply_stackable_blocks(bc: &mut BlockchainModule) {
                     // Apply db requests
                     bc_db_query
                         .apply(
-                            &bc.blocks_databases.blockchain_db,
+                            &mut bc.blocks_databases.blockchain_db,
                             &bc.forks_dbs,
                             unwrap!(bc.currency_params).fork_window_size,
                             None,
diff --git a/lib/modules/blockchain/blockchain/src/lib.rs b/lib/modules/blockchain/blockchain/src/lib.rs
index 9662998b95d31a07aeccf7dfb90882da463f5417..44f746c45d8c65973d698327c2f984943caaa5cc 100644
--- a/lib/modules/blockchain/blockchain/src/lib.rs
+++ b/lib/modules/blockchain/blockchain/src/lib.rs
@@ -18,7 +18,6 @@
 #![allow(clippy::large_enum_variant)]
 #![deny(
     missing_docs,
-    missing_debug_implementations,
     missing_copy_implementations,
     trivial_casts,
     trivial_numeric_casts,
@@ -89,7 +88,6 @@ pub static MAX_BLOCKS_REQUEST: &u32 = &500;
 pub static DISTANCE_CALCULATOR: &RustyDistanceCalculator = &RustyDistanceCalculator {};
 
 /// Blockchain Module
-#[derive(Debug)]
 pub struct BlockchainModule {
     /// Router sender
     pub router_sender: mpsc::Sender<RouterThreadMessage<DursMsg>>,
@@ -197,7 +195,7 @@ impl BlockchainModule {
 
         // Get current blockstamp
         let current_blockstamp =
-            durs_blockchain_dal::readers::block::get_current_blockstamp(&blocks_databases)
+            durs_blockchain_dal::readers::fork_tree::get_current_blockstamp(&forks_dbs)
                 .expect("Fatal error : fail to read Blockchain DB !")
                 .unwrap_or_default();
 
diff --git a/lib/modules/blockchain/blockchain/src/sync/apply/blocks_worker.rs b/lib/modules/blockchain/blockchain/src/sync/apply/blocks_worker.rs
index bdf091a89c313369038503fa406274e64905b38f..5f5737b843d9e4e92d517c15d0a162a5442cf9b1 100644
--- a/lib/modules/blockchain/blockchain/src/sync/apply/blocks_worker.rs
+++ b/lib/modules/blockchain/blockchain/src/sync/apply/blocks_worker.rs
@@ -21,7 +21,7 @@ pub fn execute(
     pool: &ThreadPool,
     sender_sync_thread: mpsc::Sender<MessForSyncThread>,
     recv: mpsc::Receiver<SyncJobsMess>,
-    blocks_dbs: BlocksV10DBs,
+    mut blocks_dbs: BlocksV10DBs,
     forks_db: ForksDBs,
     target_blockstamp: Blockstamp,
     mut apply_pb: ProgressBar<std::io::Stdout>,
@@ -47,7 +47,7 @@ pub fn execute(
 
                         // Apply db request
                         req.apply(
-                            &blocks_dbs.blockchain_db,
+                            &mut blocks_dbs.blockchain_db,
                             &forks_db,
                             fork_window_size,
                             Some(target_blockstamp),
diff --git a/lib/modules/blockchain/blockchain/src/sync/apply/mod.rs b/lib/modules/blockchain/blockchain/src/sync/apply/mod.rs
index 449301d3b0b2207dfda37c4179cc4f1c21335ba0..cbcdfac7dcdc49d5ef48901068fe14abc161d517 100644
--- a/lib/modules/blockchain/blockchain/src/sync/apply/mod.rs
+++ b/lib/modules/blockchain/blockchain/src/sync/apply/mod.rs
@@ -27,7 +27,7 @@ use dubp_common_doc::{BlockNumber, Blockstamp};
 use dubp_currency_params::{CurrencyName, CurrencyParameters};
 use dup_crypto::keys::PubKey;
 use durs_blockchain_dal::writers::requests::WotsDBsWriteQuery;
-use durs_blockchain_dal::{BinDB, CertsExpirV10Datas, WotsV10DBs};
+use durs_blockchain_dal::{BinFreeStructDb, CertsExpirV10Datas, WotsV10DBs};
 use durs_common_tools::fatal_error;
 use durs_network_documents::url::Url;
 use durs_wot::data::rusty::RustyWebOfTrust;
@@ -59,7 +59,7 @@ pub struct BlockApplicator {
     // databases
     pub wot_index: HashMap<PubKey, NodeId>,
     pub wot_databases: WotsV10DBs,
-    pub certs_db: BinDB<CertsExpirV10Datas>,
+    pub certs_db: BinFreeStructDb<CertsExpirV10Datas>,
     // time measurement
     pub wait_begin: SystemTime,
     pub all_wait_duration: Duration,
diff --git a/lib/modules/blockchain/blockchain/src/sync/download/json_reader_worker.rs b/lib/modules/blockchain/blockchain/src/sync/download/json_reader_worker.rs
index 90520132d77d95e4246e47cf6f00356295c11466..6fa6caabff79d60402f29e8c179ed0395dcd329d 100644
--- a/lib/modules/blockchain/blockchain/src/sync/download/json_reader_worker.rs
+++ b/lib/modules/blockchain/blockchain/src/sync/download/json_reader_worker.rs
@@ -96,9 +96,9 @@ pub fn json_reader_worker(
         // Get current local blockstamp
         debug!("Get local current blockstamp...");
         let db_path = durs_conf::get_blockchain_db_path(profile_path);
-        let blocks_databases = BlocksV10DBs::open(Some(&db_path));
+        let forks_dbs = ForksDBs::open(Some(&db_path));
         let current_blockstamp: Blockstamp =
-            durs_blockchain_dal::readers::block::get_current_blockstamp(&blocks_databases)
+            durs_blockchain_dal::readers::fork_tree::get_current_blockstamp(&forks_dbs)
                 .expect("ForksV10DB : RustBreakError !")
                 .unwrap_or_default();
         info!("Local current blockstamp = {}", current_blockstamp);
diff --git a/lib/modules/blockchain/blockchain/src/sync/mod.rs b/lib/modules/blockchain/blockchain/src/sync/mod.rs
index d63ded78d8a739799abe916ff8c5c90de1352087..4ab476d234fa78734c81742099ed45282b1bc294 100644
--- a/lib/modules/blockchain/blockchain/src/sync/mod.rs
+++ b/lib/modules/blockchain/blockchain/src/sync/mod.rs
@@ -24,7 +24,7 @@ use dubp_common_doc::{BlockHash, BlockNumber};
 use dubp_currency_params::{CurrencyName, CurrencyParameters};
 use dup_crypto::keys::*;
 use durs_blockchain_dal::writers::requests::*;
-use durs_blockchain_dal::{open_memory_db, CertsExpirV10Datas};
+use durs_blockchain_dal::{open_free_struct_memory_db, CertsExpirV10Datas};
 use durs_common_tools::fatal_error;
 use durs_wot::NodeId;
 use failure::Fail;
@@ -186,7 +186,7 @@ pub fn local_sync<DC: DursConfTrait>(
     // Get local current blockstamp
     debug!("Get local current blockstamp...");
     let current_blockstamp: Blockstamp =
-        durs_blockchain_dal::readers::block::get_current_blockstamp(&blocks_dbs)
+        durs_blockchain_dal::readers::fork_tree::get_current_blockstamp(&forks_dbs)
             .expect("DALError : fail to get current blockstamp !")
             .unwrap_or_default();
     debug!("Success to get local current blockstamp.");
@@ -261,8 +261,9 @@ pub fn local_sync<DC: DursConfTrait>(
 
     // Open databases
     let dbs_path = durs_conf::get_blockchain_db_path(profile_path.clone());
-    let certs_db =
-        BinDB::Mem(open_memory_db::<CertsExpirV10Datas>().expect("Fail to create memory certs_db"));
+    let certs_db = BinFreeStructDb::Mem(
+        open_free_struct_memory_db::<CertsExpirV10Datas>().expect("Fail to create memory certs_db"),
+    );
 
     // initialise le BlockApplicator
     let mut block_applicator = BlockApplicator {