Skip to content
Snippets Groups Projects
Commit bbff027a authored by Éloïs's avatar Éloïs
Browse files

Merge branch 'ref/cm-to-global' into 'dev'

[feat] server: create global cache

See merge request !1366
parents 8fb9eca8 6ce60364
No related branches found
No related tags found
1 merge request!1366[feat] server: create global cache
Showing
with 362 additions and 111 deletions
...@@ -103,6 +103,9 @@ name = "arrayvec" ...@@ -103,6 +103,9 @@ name = "arrayvec"
version = "0.5.2" version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "async-attributes" name = "async-attributes"
...@@ -282,6 +285,16 @@ dependencies = [ ...@@ -282,6 +285,16 @@ dependencies = [
"futures-micro", "futures-micro",
] ]
[[package]]
name = "async-rwlock"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "261803dcc39ba9e72760ba6e16d0199b1eef9fc44e81bffabbebb9f5aea3906c"
dependencies = [
"async-mutex",
"event-listener",
]
[[package]] [[package]]
name = "async-std" name = "async-std"
version = "1.6.5" version = "1.6.5"
...@@ -1154,12 +1167,14 @@ name = "duniter-bca" ...@@ -1154,12 +1167,14 @@ name = "duniter-bca"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"arrayvec",
"async-bincode", "async-bincode",
"async_io_stream", "async_io_stream",
"bincode", "bincode",
"dubp", "dubp",
"duniter-bca-types", "duniter-bca-types",
"duniter-dbs", "duniter-dbs",
"duniter-global",
"duniter-gva-db", "duniter-gva-db",
"duniter-gva-dbs-reader", "duniter-gva-dbs-reader",
"duniter-mempools", "duniter-mempools",
...@@ -1176,6 +1191,7 @@ dependencies = [ ...@@ -1176,6 +1191,7 @@ dependencies = [
name = "duniter-bca-types" name = "duniter-bca-types"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"arrayvec",
"bincode", "bincode",
"dubp", "dubp",
"serde", "serde",
...@@ -1249,13 +1265,28 @@ dependencies = [ ...@@ -1249,13 +1265,28 @@ dependencies = [
"chrono", "chrono",
"dubp", "dubp",
"duniter-dbs", "duniter-dbs",
"duniter-global",
"fast-threadpool", "fast-threadpool",
"flume",
"log", "log",
"maplit", "maplit",
"resiter", "resiter",
"serde_json", "serde_json",
] ]
[[package]]
name = "duniter-global"
version = "1.8.1"
dependencies = [
"async-rwlock",
"dubp",
"duniter-dbs",
"flume",
"mockall",
"once_cell",
"tokio",
]
[[package]] [[package]]
name = "duniter-gva" name = "duniter-gva"
version = "0.1.0" version = "0.1.0"
...@@ -1270,6 +1301,7 @@ dependencies = [ ...@@ -1270,6 +1301,7 @@ dependencies = [
"duniter-bca", "duniter-bca",
"duniter-conf", "duniter-conf",
"duniter-dbs", "duniter-dbs",
"duniter-global",
"duniter-gva-db", "duniter-gva-db",
"duniter-gva-dbs-reader", "duniter-gva-dbs-reader",
"duniter-gva-gql", "duniter-gva-gql",
...@@ -1315,6 +1347,7 @@ dependencies = [ ...@@ -1315,6 +1347,7 @@ dependencies = [
"anyhow", "anyhow",
"arrayvec", "arrayvec",
"dubp", "dubp",
"duniter-bca-types",
"duniter-dbs", "duniter-dbs",
"duniter-gva-db", "duniter-gva-db",
"maplit", "maplit",
...@@ -1336,6 +1369,7 @@ dependencies = [ ...@@ -1336,6 +1369,7 @@ dependencies = [
"duniter-bc-reader", "duniter-bc-reader",
"duniter-conf", "duniter-conf",
"duniter-dbs", "duniter-dbs",
"duniter-global",
"duniter-gva-db", "duniter-gva-db",
"duniter-gva-dbs-reader", "duniter-gva-dbs-reader",
"duniter-mempools", "duniter-mempools",
...@@ -1425,8 +1459,10 @@ dependencies = [ ...@@ -1425,8 +1459,10 @@ dependencies = [
"dubp", "dubp",
"duniter-conf", "duniter-conf",
"duniter-dbs", "duniter-dbs",
"duniter-global",
"duniter-mempools", "duniter-mempools",
"fast-threadpool", "fast-threadpool",
"log",
"paste", "paste",
"tokio", "tokio",
] ]
...@@ -1442,6 +1478,7 @@ dependencies = [ ...@@ -1442,6 +1478,7 @@ dependencies = [
"duniter-conf", "duniter-conf",
"duniter-dbs", "duniter-dbs",
"duniter-dbs-write-ops", "duniter-dbs-write-ops",
"duniter-global",
"duniter-gva", "duniter-gva",
"duniter-mempools", "duniter-mempools",
"duniter-module", "duniter-module",
...@@ -1450,7 +1487,6 @@ dependencies = [ ...@@ -1450,7 +1487,6 @@ dependencies = [
"log", "log",
"paste", "paste",
"resiter", "resiter",
"tokio",
] ]
[[package]] [[package]]
......
...@@ -41,6 +41,7 @@ members = [ ...@@ -41,6 +41,7 @@ members = [
"rust-libs/duniter-mempools", "rust-libs/duniter-mempools",
"rust-libs/duniter-module", "rust-libs/duniter-module",
"rust-libs/duniter-server", "rust-libs/duniter-server",
"rust-libs/duniter-global",
"rust-libs/modules/gva", "rust-libs/modules/gva",
"rust-libs/modules/gva/bca", "rust-libs/modules/gva/bca",
"rust-libs/modules/gva/bca/types", "rust-libs/modules/gva/bca/types",
......
...@@ -142,6 +142,7 @@ declare_types! { ...@@ -142,6 +142,7 @@ declare_types! {
let server = this.borrow(&guard); let server = this.borrow(&guard);
server.server.get_self_endpoints() server.server.get_self_endpoints()
}.map(|endpoints| { }.map(|endpoints| {
log::info!("TMP DEBUG get_self_endpoints={:?}", endpoints);
let js_array = JsArray::new(&mut cx, endpoints.len() as u32); let js_array = JsArray::new(&mut cx, endpoints.len() as u32);
for (i, ep) in endpoints.iter().enumerate() { for (i, ep) in endpoints.iter().enumerate() {
let js_string = cx.string(ep); let js_string = cx.string(ep);
......
...@@ -119,7 +119,7 @@ fn migrate_inner( ...@@ -119,7 +119,7 @@ fn migrate_inner(
}) })
.expect("gva:apply_chunk: dbs pool disconnected"); .expect("gva:apply_chunk: dbs pool disconnected");
current = Some(duniter_dbs_write_ops::apply_block::apply_chunk( current = Some(duniter_dbs_write_ops::apply_block::apply_chunk(
bc_db, current, &dbs_pool, chunk, bc_db, current, &dbs_pool, chunk, None,
)?); )?);
gva_handle gva_handle
.join() .join()
......
...@@ -23,15 +23,7 @@ ...@@ -23,15 +23,7 @@
)] )]
use dubp::crypto::hashs::Hash; use dubp::crypto::hashs::Hash;
use duniter_dbs::{ use duniter_dbs::{databases::bc_v2::BcV2DbReadable, kv_typed::prelude::*, HashKeyV2};
databases::{bc_v2::BcV2DbReadable, cm_v1::CmV1DbReadable},
kv_typed::prelude::*,
BlockMetaV2, HashKeyV2,
};
pub fn get_current_block_meta<CmDb: CmV1DbReadable>(cm_db: &CmDb) -> KvResult<Option<BlockMetaV2>> {
cm_db.current_block_meta().get(&())
}
pub fn tx_exist<BcDb: BcV2DbReadable>(bc_db_ro: &BcDb, hash: Hash) -> KvResult<bool> { pub fn tx_exist<BcDb: BcV2DbReadable>(bc_db_ro: &BcDb, hash: Hash) -> KvResult<bool> {
Ok(bc_db_ro.txs_hashs().contains_key(&HashKeyV2(hash))?) Ok(bc_db_ro.txs_hashs().contains_key(&HashKeyV2(hash))?)
......
...@@ -15,7 +15,9 @@ path = "src/lib.rs" ...@@ -15,7 +15,9 @@ path = "src/lib.rs"
chrono = "0.4.19" chrono = "0.4.19"
dubp = { version = "0.50.0", features = ["duniter"] } dubp = { version = "0.50.0", features = ["duniter"] }
duniter-dbs = { path = "../duniter-dbs" } duniter-dbs = { path = "../duniter-dbs" }
duniter-global = { path = "../duniter-global" }
fast-threadpool = "0.2.3" fast-threadpool = "0.2.3"
flume = "0.10"
log = "0.4.11" log = "0.4.11"
resiter = "0.4.0" resiter = "0.4.0"
......
...@@ -20,11 +20,12 @@ pub fn apply_block( ...@@ -20,11 +20,12 @@ pub fn apply_block(
block: Arc<DubpBlockV10>, block: Arc<DubpBlockV10>,
current_opt: Option<BlockMetaV2>, current_opt: Option<BlockMetaV2>,
dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>, dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
throw_chainability: bool, throw_chainability: bool,
) -> KvResult<BlockMetaV2> { ) -> KvResult<BlockMetaV2> {
if let Some(current) = current_opt { if let Some(current) = current_opt {
if block.number().0 == current.number + 1 { if block.number().0 == current.number + 1 {
apply_block_inner(bc_db, dbs_pool, block) apply_block_inner(bc_db, dbs_pool, block, global_sender)
} else if throw_chainability { } else if throw_chainability {
Err(KvError::Custom( Err(KvError::Custom(
format!( format!(
...@@ -38,7 +39,7 @@ pub fn apply_block( ...@@ -38,7 +39,7 @@ pub fn apply_block(
Ok(current) Ok(current)
} }
} else if block.number() == BlockNumber(0) { } else if block.number() == BlockNumber(0) {
apply_block_inner(bc_db, dbs_pool, block) apply_block_inner(bc_db, dbs_pool, block, global_sender)
} else { } else {
Err(KvError::Custom( Err(KvError::Custom(
"Try to apply non genesis block on empty blockchain".into(), "Try to apply non genesis block on empty blockchain".into(),
...@@ -52,9 +53,10 @@ pub fn apply_chunk( ...@@ -52,9 +53,10 @@ pub fn apply_chunk(
current_opt: Option<BlockMetaV2>, current_opt: Option<BlockMetaV2>,
dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>, dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
blocks: Arc<[DubpBlockV10]>, blocks: Arc<[DubpBlockV10]>,
global_sender: Option<&flume::Sender<GlobalBackGroundTaskMsg>>,
) -> KvResult<BlockMetaV2> { ) -> KvResult<BlockMetaV2> {
verify_chunk_chainability(current_opt, &blocks)?; verify_chunk_chainability(current_opt, &blocks)?;
apply_chunk_inner(bc_db, dbs_pool, blocks) apply_chunk_inner(bc_db, dbs_pool, blocks, global_sender)
} }
fn verify_chunk_chainability( fn verify_chunk_chainability(
...@@ -105,17 +107,13 @@ fn apply_block_inner( ...@@ -105,17 +107,13 @@ fn apply_block_inner(
bc_db: &BcV2Db<FileBackend>, bc_db: &BcV2Db<FileBackend>,
dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>, dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
block: Arc<DubpBlockV10>, block: Arc<DubpBlockV10>,
global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
) -> KvResult<BlockMetaV2> { ) -> KvResult<BlockMetaV2> {
let block_for_cm = Arc::clone(&block); let block_for_cm = Arc::clone(&block);
let block_for_txs_mp = Arc::clone(&block); let block_for_txs_mp = Arc::clone(&block);
// Cm // Cm
let cm_handle = dbs_pool crate::cm::update_current_meta(&block_for_cm, &global_sender);
.launch(move |dbs| {
crate::cm::apply_block(&block_for_cm, &dbs.cm_db)?;
Ok::<_, KvError>(())
})
.expect("dbs pool disconnected");
//TxsMp //TxsMp
let txs_mp_handle = dbs_pool let txs_mp_handle = dbs_pool
...@@ -128,7 +126,6 @@ fn apply_block_inner( ...@@ -128,7 +126,6 @@ fn apply_block_inner(
// Bc // Bc
let new_current = crate::bc::apply_block(bc_db, &block)?; let new_current = crate::bc::apply_block(bc_db, &block)?;
cm_handle.join().expect("dbs pool disconnected")?;
txs_mp_handle.join().expect("dbs pool disconnected")?; txs_mp_handle.join().expect("dbs pool disconnected")?;
Ok(new_current) Ok(new_current)
...@@ -138,18 +135,16 @@ fn apply_chunk_inner( ...@@ -138,18 +135,16 @@ fn apply_chunk_inner(
bc_db: &BcV2Db<FileBackend>, bc_db: &BcV2Db<FileBackend>,
dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>, dbs_pool: &fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
blocks: Arc<[DubpBlockV10]>, blocks: Arc<[DubpBlockV10]>,
global_sender: Option<&flume::Sender<GlobalBackGroundTaskMsg>>,
) -> KvResult<BlockMetaV2> { ) -> KvResult<BlockMetaV2> {
let blocks_len = blocks.len(); let blocks_len = blocks.len();
let blocks_for_cm = Arc::clone(&blocks);
let blocks_for_txs_mp = Arc::clone(&blocks); let blocks_for_txs_mp = Arc::clone(&blocks);
// Cm // Cm
let cm_handle = dbs_pool if let Some(global_sender) = global_sender {
.launch(move |dbs| { let chunk_len = blocks.len();
let chunk_len = blocks_for_cm.len(); crate::cm::update_current_meta(&&blocks.deref()[chunk_len - 1], &global_sender);
crate::cm::apply_block(&blocks_for_cm.deref()[chunk_len - 1], &dbs.cm_db) }
})
.expect("dbs pool disconnected");
//TxsMp //TxsMp
//log::info!("apply_chunk: launch txs_mp job..."); //log::info!("apply_chunk: launch txs_mp job...");
...@@ -169,7 +164,6 @@ fn apply_chunk_inner( ...@@ -169,7 +164,6 @@ fn apply_chunk_inner(
} }
let current_block = crate::bc::apply_block(bc_db, &blocks[blocks_len - 1])?; let current_block = crate::bc::apply_block(bc_db, &blocks[blocks_len - 1])?;
cm_handle.join().expect("dbs pool disconnected")?;
txs_mp_handle txs_mp_handle
.join() .join()
.expect("txs_mp_recv: dbs pool disconnected")?; .expect("txs_mp_recv: dbs pool disconnected")?;
......
...@@ -14,27 +14,38 @@ ...@@ -14,27 +14,38 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::*; use crate::*;
use duniter_dbs::databases::bc_v2::BcV2DbReadable;
use duniter_dbs::BlockDbV2;
pub fn init(bc_db: &BcV2Db<FileBackend>, cm_db: &CmV1Db<MemSingleton>) -> KvResult<()> { #[inline(always)]
if let Some(current_block_meta) = bc_db pub(crate) fn update_current_meta(
.blocks_meta() block: &DubpBlockV10,
.iter_rev(.., |it| it.values().next_res())? global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
{ ) {
cm_db let current_block_meta = block_to_block_meta(block);
.current_block_meta_write() global_sender
.upsert((), current_block_meta) .send(GlobalBackGroundTaskMsg::NewCurrentBlock(current_block_meta))
} else { .expect("global task disconnected");
Ok(())
}
} }
pub fn apply_block(block: &DubpBlockV10, cm_db: &CmV1Db<MemSingleton>) -> KvResult<()> { fn block_to_block_meta(block: &DubpBlockV10) -> BlockMetaV2 {
let block_meta = BlockMetaV2::from(block); BlockMetaV2 {
cm_db.current_block_meta_write().upsert((), block_meta)?; version: 10,
cm_db number: block.number().0,
.current_block_write() hash: block.hash().0,
.upsert((), BlockDbV2(block.clone()))?; signature: block.signature(),
Ok(()) inner_hash: block.inner_hash(),
previous_hash: block.previous_hash(),
issuer: block.issuer(),
previous_issuer: dubp::crypto::keys::ed25519::PublicKey::default(),
time: block.local_time(),
pow_min: block.pow_min() as u32,
members_count: block.members_count() as u64,
issuers_count: block.issuers_count() as u32,
issuers_frame: block.issuers_frame() as u64,
issuers_frame_var: 0,
median_time: block.common_time(),
nonce: block.nonce(),
monetary_mass: block.monetary_mass(),
unit_base: block.unit_base() as u32,
dividend: block.dividend(),
}
} }
...@@ -40,13 +40,13 @@ use dubp::wallet::prelude::*; ...@@ -40,13 +40,13 @@ use dubp::wallet::prelude::*;
use duniter_dbs::{ use duniter_dbs::{
databases::{ databases::{
bc_v2::BcV2Db, bc_v2::BcV2Db,
cm_v1::{CmV1Db, CmV1DbWritable},
txs_mp_v2::{TxsMpV2Db, TxsMpV2DbReadable, TxsMpV2DbWritable}, txs_mp_v2::{TxsMpV2Db, TxsMpV2DbReadable, TxsMpV2DbWritable},
}, },
kv_typed::prelude::*, kv_typed::prelude::*,
BlockMetaV2, FileBackend, HashKeyV2, PendingTxDbV2, PubKeyKeyV2, PubKeyValV2, SharedDbs, BlockMetaV2, FileBackend, HashKeyV2, PendingTxDbV2, PubKeyKeyV2, PubKeyValV2, SharedDbs,
SourceAmountValV2, UtxoValV2, WalletConditionsV2, SourceAmountValV2, UtxoValV2, WalletConditionsV2,
}; };
use duniter_global::GlobalBackGroundTaskMsg;
use resiter::filter_map::FilterMap; use resiter::filter_map::FilterMap;
use resiter::flatten::Flatten; use resiter::flatten::Flatten;
use resiter::map::Map; use resiter::map::Map;
......
...@@ -15,11 +15,4 @@ ...@@ -15,11 +15,4 @@
use crate::*; use crate::*;
db_schema!( db_schema!(CmV1, [["current_block", CurrentBlock, (), BlockDbV2],]);
CmV1,
[
["self_peer_old", SelfPeerOld, (), PeerCardDbV1],
["current_block_meta", CurrentBlockMeta, (), BlockMetaV2],
["current_block", CurrentBlock, (), BlockDbV2],
]
);
[package]
name = "duniter-global"
version = "1.8.1"
authors = ["librelois <elois@duniter.org>"]
license = "AGPL-3.0"
edition = "2018"
[dependencies]
async-rwlock = "1.3.0"
dubp = { version = "0.50.0", features = ["duniter"] }
duniter-dbs = { path = "../duniter-dbs" }
flume = "0.10"
mockall = { version = "0.9", optional = true }
once_cell = "1.5"
tokio = { version = "1.2", features = ["io-util", "rt-multi-thread"] }
[features]
mock = ["mockall"]
// Copyright (C) 2020 Éloïs SANCHEZ.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![deny(
clippy::unwrap_used,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unstable_features,
unused_import_braces
)]
pub use tokio;
use async_rwlock::RwLock;
use dubp::wallet::prelude::SourceAmount;
use duniter_dbs::BlockMetaV2;
use once_cell::sync::OnceCell;
use std::ops::Deref;
pub static SELF_ENDPOINTS: RwLock<Option<Vec<String>>> = RwLock::new(None);
static ASYNC_RUNTIME: OnceCell<tokio::runtime::Runtime> = OnceCell::new();
static CURRENT_META: RwLock<Option<CurrentMeta>> = RwLock::new(None);
static SELF_PEER_OLD: RwLock<Option<duniter_dbs::PeerCardDbV1>> = RwLock::new(None);
#[derive(Clone, Copy, Debug, Default)]
pub struct CurrentMeta {
pub current_ud: SourceAmount,
pub current_block_meta: BlockMetaV2,
}
#[derive(Clone, Debug)]
pub enum GlobalBackGroundTaskMsg {
InitCurrentMeta(CurrentMeta),
NewCurrentBlock(BlockMetaV2),
GetSelfEndpoints(flume::Sender<Option<Vec<String>>>),
SetSelfPeerOld(duniter_dbs::PeerCardDbV1),
}
pub async fn start_global_background_task(recv: flume::Receiver<GlobalBackGroundTaskMsg>) {
tokio::spawn(async move {
while let Ok(msg) = recv.recv_async().await {
match msg {
GlobalBackGroundTaskMsg::InitCurrentMeta(current_meta) => {
let mut write_guard = CURRENT_META.write().await;
write_guard.replace(current_meta);
}
GlobalBackGroundTaskMsg::NewCurrentBlock(current_block_meta) => {
let upgradable_read_guard = CURRENT_META.upgradable_read().await;
let new_current_meta = if let Some(dividend) = current_block_meta.dividend {
CurrentMeta {
current_ud: dividend,
current_block_meta,
}
} else if let Some(current_meta) = upgradable_read_guard.deref() {
CurrentMeta {
current_ud: current_meta.current_ud,
current_block_meta,
}
} else {
CurrentMeta {
current_ud: SourceAmount::ZERO,
current_block_meta,
}
};
let mut write_guard =
async_rwlock::RwLockUpgradableReadGuard::upgrade(upgradable_read_guard)
.await;
write_guard.replace(new_current_meta);
}
GlobalBackGroundTaskMsg::GetSelfEndpoints(sender) => {
let read_guard = SELF_ENDPOINTS.read().await;
let _ = sender.send_async(read_guard.deref().clone()).await;
}
GlobalBackGroundTaskMsg::SetSelfPeerOld(self_peer_old) => {
let mut write_guard = SELF_PEER_OLD.write().await;
write_guard.replace(self_peer_old);
}
}
}
});
}
pub fn get_async_runtime() -> &'static tokio::runtime::Runtime {
ASYNC_RUNTIME.get_or_init(|| {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("fail to build tokio runtime")
})
}
#[derive(Clone, Copy, Debug, Default)]
pub struct AsyncAccessor;
impl AsyncAccessor {
pub fn new() -> Self {
AsyncAccessor
}
pub async fn get_current_meta<D: 'static, F: 'static + FnOnce(&CurrentMeta) -> D>(
&self,
f: F,
) -> Option<D> {
let read_guard = CURRENT_META.read().await;
if let Some(current_meta) = read_guard.deref() {
Some(f(current_meta))
} else {
None
}
}
pub async fn get_self_peer_old<
D: 'static,
F: 'static + FnOnce(&duniter_dbs::PeerCardDbV1) -> D,
>(
&self,
f: F,
) -> Option<D> {
let read_guard = SELF_PEER_OLD.read().await;
if let Some(self_peer_old) = read_guard.deref() {
Some(f(self_peer_old))
} else {
None
}
}
}
#[cfg(feature = "mock")]
mockall::mock! {
pub AsyncAccessor {
pub async fn get_current_meta<D: 'static, F: 'static + FnOnce(&CurrentMeta) -> D>(
&self,
f: F,
) -> Option<D>;
pub async fn get_self_peer_old<
D: 'static,
F: 'static + FnOnce(&duniter_dbs::PeerCardDbV1) -> D,
>(
&self,
f: F,
) -> Option<D>;
}
}
...@@ -11,8 +11,10 @@ async-trait = "0.1.41" ...@@ -11,8 +11,10 @@ async-trait = "0.1.41"
dubp = { version = "0.50.0", features = ["duniter"] } dubp = { version = "0.50.0", features = ["duniter"] }
duniter-conf = { path = "../duniter-conf" } duniter-conf = { path = "../duniter-conf" }
duniter-dbs = { path = "../duniter-dbs" } duniter-dbs = { path = "../duniter-dbs" }
duniter-global = { path = "../duniter-global" }
duniter-mempools = { path = "../duniter-mempools" } duniter-mempools = { path = "../duniter-mempools" }
fast-threadpool = "0.2.3" fast-threadpool = "0.2.3"
log = "0.4"
[dev-dependencies] [dev-dependencies]
duniter-dbs = { path = "../duniter-dbs", features = ["mem"] } duniter-dbs = { path = "../duniter-dbs", features = ["mem"] }
......
...@@ -216,16 +216,8 @@ macro_rules! plug_duniter_modules { ...@@ -216,16 +216,8 @@ macro_rules! plug_duniter_modules {
all_endpoints.append(&mut endpoints); all_endpoints.append(&mut endpoints);
)* )*
let self_peer = duniter_dbs::PeerCardDbV1 { log::info!("TMP DEBUG SELF_ENDPOINTS={:?}", all_endpoints);
version: 10, duniter_global::SELF_ENDPOINTS.write().await.replace(all_endpoints);
currency,
endpoints: all_endpoints,
..Default::default()
};
use duniter_dbs::databases::cm_v1::CmV1DbWritable as _;
use duniter_dbs::kv_typed::prelude::DbCollectionRw as _;
dbs_pool.execute(|dbs| dbs.cm_db.self_peer_old_write().upsert((), self_peer)).await?.context("fail to save self peer card")?;
$( $(
let [<$M:snake _handle>] = tokio::spawn([<$M:snake>].start()); let [<$M:snake _handle>] = tokio::spawn([<$M:snake>].start());
......
...@@ -10,9 +10,10 @@ anyhow = "1.0.34" ...@@ -10,9 +10,10 @@ anyhow = "1.0.34"
cfg-if = "1.0.0" cfg-if = "1.0.0"
dubp = { version = "0.50.0", features = ["duniter"] } dubp = { version = "0.50.0", features = ["duniter"] }
duniter-conf = { path = "../duniter-conf" } duniter-conf = { path = "../duniter-conf" }
duniter-dbs = { path = "../duniter-dbs" }
duniter-bc-reader = { path = "../duniter-bc-reader" } duniter-bc-reader = { path = "../duniter-bc-reader" }
duniter-dbs = { path = "../duniter-dbs" }
duniter-dbs-write-ops = { path = "../duniter-dbs-write-ops" } duniter-dbs-write-ops = { path = "../duniter-dbs-write-ops" }
duniter-global = { path = "../duniter-global" }
duniter-gva = { path = "../modules/gva", optional = true } duniter-gva = { path = "../modules/gva", optional = true }
duniter-mempools = { path = "../duniter-mempools" } duniter-mempools = { path = "../duniter-mempools" }
duniter-module = { path = "../duniter-module" } duniter-module = { path = "../duniter-module" }
...@@ -21,7 +22,6 @@ flume = "0.10.0" ...@@ -21,7 +22,6 @@ flume = "0.10.0"
log = "0.4.11" log = "0.4.11"
paste = "1.0.2" paste = "1.0.2"
resiter = "0.4.0" resiter = "0.4.0"
tokio = { version = "1.2", features = ["io-util", "rt-multi-thread"] }
[features] [features]
default = ["gva"] default = ["gva"]
......
// Copyright (C) 2020 Éloïs SANCHEZ.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::*;
use dubp::wallet::prelude::SourceAmount;
use duniter_dbs::databases::bc_v2::BcV2DbReadable;
use duniter_global::{CurrentMeta, GlobalBackGroundTaskMsg};
pub(super) fn fill_and_get_current_meta<BcDb: BcV2DbReadable>(
bc_db_ro: &BcDb,
global_sender: &flume::Sender<GlobalBackGroundTaskMsg>,
) -> anyhow::Result<Option<BlockMetaV2>> {
if let Some(current_block_meta) = bc_db_ro
.blocks_meta()
.iter_rev(.., |it| it.values().next_res())?
{
if let Some(current_ud) = bc_db_ro
.uds_reval()
.iter_rev(.., |it| it.values().map_ok(|v| v.0).next_res())?
{
global_sender.send(GlobalBackGroundTaskMsg::InitCurrentMeta(CurrentMeta {
current_ud,
current_block_meta,
}))?;
} else {
global_sender.send(GlobalBackGroundTaskMsg::InitCurrentMeta(CurrentMeta {
current_ud: SourceAmount::ZERO,
current_block_meta,
}))?;
}
Ok(Some(current_block_meta))
} else {
Ok(None)
}
}
...@@ -25,6 +25,7 @@ impl DuniterServer { ...@@ -25,6 +25,7 @@ impl DuniterServer {
block.clone(), block.clone(),
self.current, self.current,
&self.dbs_pool, &self.dbs_pool,
&self.global_sender,
false, false,
)?); )?);
apply_block_modules(block, Arc::new(self.conf.clone()), &self.dbs_pool, None) apply_block_modules(block, Arc::new(self.conf.clone()), &self.dbs_pool, None)
...@@ -43,6 +44,7 @@ impl DuniterServer { ...@@ -43,6 +44,7 @@ impl DuniterServer {
self.current, self.current,
&self.dbs_pool, &self.dbs_pool,
blocks.clone(), blocks.clone(),
Some(&self.global_sender),
)?); )?);
apply_chunk_of_blocks_modules(blocks, Arc::new(self.conf.clone()), &self.dbs_pool, None) apply_chunk_of_blocks_modules(blocks, Arc::new(self.conf.clone()), &self.dbs_pool, None)
} }
......
...@@ -66,14 +66,9 @@ impl DuniterServer { ...@@ -66,14 +66,9 @@ impl DuniterServer {
.map_err(|e| e.into()) .map_err(|e| e.into())
} }
pub fn update_self_peer(&self, new_peer_card: PeerCardDbV1) { pub fn update_self_peer(&self, new_peer_card: PeerCardDbV1) {
self.dbs_pool self.global_sender
.execute(move |dbs| { .send(GlobalBackGroundTaskMsg::SetSelfPeerOld(new_peer_card))
dbs.cm_db .expect("global task disconnected");
.self_peer_old_write()
.upsert((), new_peer_card)
.expect("fail to write on memory db")
})
.expect("dbs pool disconnected")
} }
} }
......
...@@ -41,12 +41,18 @@ impl DuniterServer { ...@@ -41,12 +41,18 @@ impl DuniterServer {
.expect("dbs pool disconnected") .expect("dbs pool disconnected")
} }
pub fn get_self_endpoints(&self) -> anyhow::Result<Vec<Endpoint>> { pub fn get_self_endpoints(&self) -> anyhow::Result<Vec<Endpoint>> {
if let Some(self_peer) = self // Do not get rust endpoints on js tests
.dbs_pool if std::env::var_os("DUNITER_JS_TESTS") != Some("yes".into()) {
.execute(|dbs| dbs.cm_db.self_peer_old().get(&()))? let (sender, recv) = flume::bounded(1);
.context("fail to get self endpoints")? loop {
{ self.global_sender
Ok(self_peer.endpoints) .send(GlobalBackGroundTaskMsg::GetSelfEndpoints(sender.clone()))?;
if let Some(self_endpoints) = recv.recv()? {
break Ok(self_endpoints);
} else {
std::thread::sleep(std::time::Duration::from_millis(100));
}
}
} else { } else {
Ok(vec![]) Ok(vec![])
} }
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
unused_import_braces unused_import_braces
)] )]
mod fill_cm_db; mod fill_cm;
mod legacy; mod legacy;
pub use duniter_conf::{gva_conf::GvaConf, DuniterConf, DuniterMode}; pub use duniter_conf::{gva_conf::GvaConf, DuniterConf, DuniterMode};
...@@ -41,19 +41,16 @@ use dubp::{ ...@@ -41,19 +41,16 @@ use dubp::{
block::prelude::*, common::crypto::hashs::Hash, documents_parser::prelude::FromStringObject, block::prelude::*, common::crypto::hashs::Hash, documents_parser::prelude::FromStringObject,
}; };
use duniter_dbs::{ use duniter_dbs::{
databases::{ databases::{bc_v2::BcV2Db, txs_mp_v2::TxsMpV2DbReadable},
bc_v2::BcV2Db,
cm_v1::{CmV1DbReadable, CmV1DbWritable},
txs_mp_v2::TxsMpV2DbReadable,
},
kv_typed::prelude::*, kv_typed::prelude::*,
PendingTxDbV2, PubKeyKeyV2, PendingTxDbV2, PubKeyKeyV2,
}; };
use duniter_dbs::{prelude::*, BlockMetaV2, FileBackend}; use duniter_dbs::{prelude::*, BlockMetaV2, FileBackend};
use duniter_global::{tokio, GlobalBackGroundTaskMsg};
use duniter_mempools::{Mempools, TxMpError, TxsMempool}; use duniter_mempools::{Mempools, TxMpError, TxsMempool};
use duniter_module::{plug_duniter_modules, Endpoint, TxsHistoryForBma}; use duniter_module::{plug_duniter_modules, Endpoint, TxsHistoryForBma};
use fast_threadpool::ThreadPoolConfig; use fast_threadpool::ThreadPoolConfig;
use resiter::filter::Filter; use resiter::{filter::Filter, map::Map};
use std::{ use std::{
collections::BTreeMap, collections::BTreeMap,
path::{Path, PathBuf}, path::{Path, PathBuf},
...@@ -73,6 +70,7 @@ pub struct DuniterServer { ...@@ -73,6 +70,7 @@ pub struct DuniterServer {
conf: DuniterConf, conf: DuniterConf,
current: Option<BlockMetaV2>, current: Option<BlockMetaV2>,
dbs_pool: fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>, dbs_pool: fast_threadpool::ThreadPoolSyncHandler<SharedDbs<FileBackend>>,
global_sender: flume::Sender<GlobalBackGroundTaskMsg>,
pending_txs_subscriber: pending_txs_subscriber:
flume::Receiver<Arc<Events<duniter_dbs::databases::txs_mp_v2::TxsEvent>>>, flume::Receiver<Arc<Events<duniter_dbs::databases::txs_mp_v2::TxsEvent>>>,
profile_path_opt: Option<PathBuf>, profile_path_opt: Option<PathBuf>,
...@@ -98,10 +96,14 @@ impl DuniterServer { ...@@ -98,10 +96,14 @@ impl DuniterServer {
log::info!("open duniter databases..."); log::info!("open duniter databases...");
let (bc_db, shared_dbs) = duniter_dbs::open_dbs(profile_path_opt)?; let (bc_db, shared_dbs) = duniter_dbs::open_dbs(profile_path_opt)?;
shared_dbs.dunp_db.heads_old_write().clear()?; // Clear WS2Pv1 HEADs shared_dbs.dunp_db.heads_old_write().clear()?; // Clear WS2Pv1 HEADs
duniter_dbs_write_ops::cm::init(&bc_db, &shared_dbs.cm_db)?;
// Create channel with global async task
let (global_sender, global_recv) = flume::unbounded();
// Fill and get current meta
let current = fill_cm::fill_and_get_current_meta(&bc_db, &global_sender)?;
log::info!("Databases successfully opened."); log::info!("Databases successfully opened.");
let current = duniter_bc_reader::get_current_block_meta(&shared_dbs.cm_db)
.context("Fail to get current")?;
if let Some(current) = current { if let Some(current) = current {
log::info!("Current block: #{}-{}", current.number, current.hash); log::info!("Current block: #{}-{}", current.number, current.hash);
} else { } else {
...@@ -120,20 +122,19 @@ impl DuniterServer { ...@@ -120,20 +122,19 @@ impl DuniterServer {
let threadpool = let threadpool =
fast_threadpool::ThreadPool::start(ThreadPoolConfig::default(), shared_dbs.clone()); fast_threadpool::ThreadPool::start(ThreadPoolConfig::default(), shared_dbs.clone());
// Fill CmV1Db // Start async runtime
fill_cm_db::fill_current_meta_db(&shared_dbs)?;
if conf.gva.is_some() {
log::info!("start duniter modules...");
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()?;
let conf_clone = conf.clone(); let conf_clone = conf.clone();
let profile_path_opt_clone = profile_path_opt.map(ToOwned::to_owned); let profile_path_opt_clone = profile_path_opt.map(ToOwned::to_owned);
let threadpool_async_handler = threadpool.async_handler(); let threadpool_async_handler = threadpool.async_handler();
std::thread::spawn(move || { std::thread::spawn(move || {
runtime duniter_global::get_async_runtime().block_on(async {
.block_on(start_duniter_modules( // Start global background task
duniter_global::start_global_background_task(global_recv).await;
// Start duniter modules
if conf_clone.gva.is_some() {
log::info!("start duniter modules...");
start_duniter_modules(
&conf_clone, &conf_clone,
currency, currency,
threadpool_async_handler, threadpool_async_handler,
...@@ -141,16 +142,19 @@ impl DuniterServer { ...@@ -141,16 +142,19 @@ impl DuniterServer {
duniter_mode, duniter_mode,
profile_path_opt_clone, profile_path_opt_clone,
software_version, software_version,
)) )
.context("Fail to start duniter modules") .await
}); .expect("Fail to start duniter modules");
} }
});
});
Ok(DuniterServer { Ok(DuniterServer {
bc_db, bc_db,
conf, conf,
current, current,
dbs_pool: threadpool.into_sync_handler(), dbs_pool: threadpool.into_sync_handler(),
global_sender,
pending_txs_subscriber, pending_txs_subscriber,
profile_path_opt: profile_path_opt.map(ToOwned::to_owned), profile_path_opt: profile_path_opt.map(ToOwned::to_owned),
shared_dbs, shared_dbs,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment