Commit 1e90d113 authored by Éloïs's avatar Éloïs
Browse files

[style] comply to clippy 1.49

parent d1e1c139
......@@ -55,16 +55,15 @@ pub(crate) fn export_bc<B: Backend>(
let (s, r) = flume::unbounded();
let reader_handle = std::thread::spawn(move || {
bc_v1.main_blocks().iter(.., |it| {
it.values()
.map(|block_res| s.send(block_res).map_err(|_| anyhow!("fail to send")))
.collect::<anyhow::Result<()>>()
it.values().try_for_each(|block_res| {
s.send(block_res).map_err(|_| anyhow!("fail to send"))
})
})
});
let (s2, r2) = flume::unbounded();
let jsonifier_handle = std::thread::spawn(move || {
r.iter()
.map(|block_res| {
r.iter().try_for_each(|block_res| {
let json_block_res = match block_res {
Ok(block) => {
serde_json::to_value(&block).map_err(|e| KvError::DeserError(e.into()))
......@@ -73,7 +72,6 @@ pub(crate) fn export_bc<B: Backend>(
};
s2.send(json_block_res).map_err(|_| anyhow!("fail to send"))
})
.collect::<anyhow::Result<()>>()
});
let threadpool = ThreadPool::start(ThreadPoolConfig::default(), ()).into_sync_handler();
......
......@@ -49,7 +49,6 @@ use std::{
collections::{HashMap, HashSet},
fs::File,
io::{stdin, Write},
iter::FromIterator,
time::Instant,
};
use structopt::StructOpt;
......
......@@ -65,9 +65,9 @@ fn migrate_inner(
let (s, r) = flume::unbounded();
let reader_handle = std::thread::spawn(move || {
duniter_js_db.main_blocks().iter(.., |it| {
it.values()
.map(|block_res| s.send(block_res).map_err(|_| anyhow!("fail to send")))
.collect::<anyhow::Result<()>>()
it.values().try_for_each(|block_res| {
s.send(block_res).map_err(|_| anyhow!("fail to send"))
})
})
});
let (s2, r2) = flume::unbounded();
......
......@@ -42,7 +42,7 @@ pub fn print_found_data<W: Write>(
}
let only_properties_set = if !only_properties.is_empty() {
HashSet::from_iter(only_properties.into_iter())
only_properties.into_iter().collect()
} else {
HashSet::with_capacity(0)
};
......
......@@ -474,14 +474,13 @@ mod tests {
use duniter_dbs::smallvec::smallvec as svec;
use duniter_dbs::{databases::bc_v2::BcV2DbWritable, SourceAmountValV2, UdIdV2};
use duniter_gva_db::GvaV1DbWritable;
use std::iter::FromIterator;
#[test]
fn test_filter_blocks_numbers() -> KvResult<()> {
let idty = GvaIdtyDbV1 {
is_member: true,
joins: svec![BlockNumber(26), BlockNumber(51)],
leaves: BTreeSet::from_iter([BlockNumber(32)].iter().copied()),
leaves: [BlockNumber(32)].iter().copied().collect(),
first_ud: Some(BlockNumber(29)),
};
let blocks_with_ud = vec![
......@@ -530,7 +529,7 @@ mod tests {
let idty = GvaIdtyDbV1 {
is_member: true,
joins: svec![BlockNumber(26), BlockNumber(51)],
leaves: BTreeSet::from_iter([BlockNumber(32)].iter().copied()),
leaves: [BlockNumber(32)].iter().copied().collect(),
first_ud: Some(BlockNumber(29)),
};
......
......@@ -18,7 +18,6 @@ use async_mutex::Mutex;
use duniter_dbs::kv_typed::prelude::Arc;
use std::{
collections::{HashMap, HashSet},
iter::FromIterator,
net::IpAddr,
time::Duration,
time::Instant,
......@@ -49,7 +48,7 @@ impl From<&GvaConf> for AntiSpam {
ban: HashMap::with_capacity(10),
ips_time: HashMap::with_capacity(10),
})),
whitelist: HashSet::from_iter(conf.get_whitelist().iter().copied()),
whitelist: conf.get_whitelist().iter().copied().collect(),
}
}
}
......
......@@ -47,7 +47,7 @@ where
{
fn as_bytes<D, F: FnMut(&[u8]) -> D>(&self, mut f: F) -> D {
use zerocopy::AsBytes as _;
f((&SmallVec::<[T; 32]>::from_iter(self.iter().copied())[..]).as_bytes())
f((&self.iter().copied().collect::<SmallVec<[T; 32]>>()[..]).as_bytes())
}
}
......@@ -57,7 +57,7 @@ where
{
fn as_bytes<D, F: FnMut(&[u8]) -> D>(&self, mut f: F) -> D {
use zerocopy::AsBytes as _;
f((&SmallVec::<[T; 32]>::from_iter(self.iter().copied())[..]).as_bytes())
f((&self.iter().copied().collect::<SmallVec<[T; 32]>>()[..]).as_bytes())
}
}
......
......@@ -100,7 +100,7 @@ where
let layout_verified = zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes)
.ok_or(LayoutVerifiedErr(stringify!(BTreeSet<T>)))?;
let slice = layout_verified.into_slice();
Ok(BTreeSet::from_iter(slice.iter().copied()))
Ok(slice.iter().copied().collect())
}
}
......@@ -114,7 +114,7 @@ where
let layout_verified = zerocopy::LayoutVerified::<_, [T]>::new_slice(bytes)
.ok_or(LayoutVerifiedErr(stringify!(HashSet<T>)))?;
let slice = layout_verified.into_slice();
Ok(HashSet::from_iter(slice.iter().copied()))
Ok(slice.iter().copied().collect())
}
}
......
......@@ -114,7 +114,6 @@ pub(crate) use std::{
convert::TryInto,
error::Error,
fmt::{Debug, Display},
iter::FromIterator,
marker::PhantomData,
ops::{Bound, RangeBounds},
str::FromStr,
......
......@@ -14,6 +14,7 @@ db_schema!(
);
#[test]
#[allow(clippy::eq_op)]
fn test_macro_db() {
assert_eq!(Col1Event::RemoveAll, Col1Event::RemoveAll);
......@@ -118,9 +119,8 @@ fn test_db<B: Backend>(db: &TestV1Db<B>) -> KvResult<()> {
})?;
// Test get_ref_slice
use std::iter::FromIterator as _;
db.col4_write()
.upsert(4, BTreeSet::from_iter((&[3, 2, 4, 1]).iter().copied()))?;
.upsert(4, (&[3, 2, 4, 1]).iter().copied().collect())?;
db.col4().get_ref_slice(&4, |numbers| {
assert_eq!(numbers, &[1, 2, 3, 4]);
Ok(())
......@@ -195,7 +195,7 @@ fn test_db<B: Backend>(db: &TestV1Db<B>) -> KvResult<()> {
Ok::<(), KvError>(())
})?;
c4.upsert(4, BTreeSet::from_iter((&[7, 8, 6, 5]).iter().copied()));
c4.upsert(4, (&[7, 8, 6, 5]).iter().copied().collect());
Ok(())
});
tres?;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment