Skip to content
Snippets Groups Projects
Commit 194aeb9b authored by Hugo Trentesaux's avatar Hugo Trentesaux
Browse files

cleanup

parent f145202e
No related branches found
No related tags found
No related merge requests found
......@@ -7,18 +7,22 @@ Duniter Datapod is designed for offchain storage of Ğ1 data but does not depend
- a dev tool in Vue to understand the architecture, explore the data, and debug
![scheme](./doc/scheme.svg)
> sheme of data flow in Duniter Datapod
## Use
To start a full indexer in production mode with docker, use the [`docker-compose.prod.yml`](./docker-compose.prod.yml) file:
To start a full indexer in production mode with docker, use the [`docker-compose.prod.yml`](./docker-compose.prod.yml) and [`.env.prod.example`](./.env.prod.example) files:
```sh
# start
# edit env file
vim .env
# start services
docker compose up -d
```
This will pull preconfigured images for postgres/hasura, kubo and datapod. This should:
- connect to existing network
- start collecting from default IPNS
- index to database
......@@ -75,25 +79,25 @@ More detail in the doc below.
## TODO
Bugs
- [ ] initialize dd_keys for new node (→ bootstrap)
- [x] initialize dd_keys for new node (→ bootstrap)
- [ ] fix merging blocked when inode unreachable, timeout seems ignored
- [ ] fix pubsub cid can not be fetched triggers pubsub abort (dirty workaround for know)
- [ ] fix `UND_ERR_HEADERS_TIMEOUT` that happen very often when pinning 📌
- [ ] fix pubsub socket closing after 5 minutes
- [ ] fix `UND_ERR_HEADERS_TIMEOUT` that happen often for unknown reasons
Features
- [x] pubkey instead of ss58 address if we want data to be compatible across networks → ss58
- [ ] add periodic sync with a list of trusted peers (IPNS entries)
- [x] add periodic sync with a list of trusted peers (IPNS entries)
- [ ] split indexer vue app from backend indexer and improve node admin app
- [x] clarify the purpose of the the main TAMT
- [ ] clarify the adressing format in the tables
- [ ] add domain specific index for profile for example
- [ ] add a refcount to count the number of documents
- [x] count the number of documents
- [ ] make the app build in prod mode
- [ ] allow connecting the app to a custom RPC endpoint
- [ ] manage unpin requests when user/admin wants to delete data, see refcount
- [ ] manage unpin requests when user/admin wants to delete data (requires ipfs refcount)
- [x] document dev database change with tracking hasura console and squashing migrations
- [ ] add transaction comment (onchain + offchain to allow unified search)
- [ ] add version history to database (history of index request CIDs) -> not systematic
- [ ] update description of pubkey field to "ss58/address"
- [ ] add ability to remove a node as well as its parent if it leaves it empty
- [ ] make base custom per tree (base 16, base 32)
- [ ] add ability to remove a node (and its parents as well if they become empty)
- [ ]
......@@ -16,6 +16,8 @@ ipfs config Addresses.Swarm --json '[
"/ip6/::/tcp/4001",
"/ip4/0.0.0.0/tcp/4002/ws",
"/ip6/::/tcp/4002/ws",
"/ip4/0.0.0.0/udp/4001/webrtc-direct",
"/ip6/::/udp/4001/webrtc-direct",
"/ip4/0.0.0.0/udp/4001/quic-v1",
"/ip6/::/udp/4001/quic-v1",
"/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
......@@ -90,4 +92,4 @@ ipfs config Gateway.ExposeRoutingAPI --json true
# use pubsub for IPNS records
# ipfs config --json Ipns.UsePubsub true
# republish records frequently
ipfs config --json Ipns.RepublishPeriod '"1min"'
\ No newline at end of file
ipfs config --json Ipns.RepublishPeriod '"5m"'
\ No newline at end of file
......@@ -143,6 +143,7 @@ const cesiumPlusProfileRaw: QueryBuilder = {
const fileCandidate = { content: new Uint8Array(buffer) }
// optimization: compute the hash locally without submitting it to kubo
// difficulty: check that the hash is the same
// FIXME adding the avatar like this causes a lot of computation
cplus.avatar = (await kubo.add(fileCandidate)).cid
}
return cplus
......
......@@ -192,7 +192,7 @@ export async function computeDiff(fromCID: CID, toCID: CID): Promise<void> {
events.emit(evtype.triggerCollect)
// This is a hack to limit injestion of new data and let time to process all
// for 100 000 documents with a batch size of 1000 and 3 seconds, it is adding 5 minutes overall
await setTimeout(3000) // 3 sec
// await setTimeout(3000) // 3 sec
}
}
events.emit(evtype.triggerCollect)
......
......@@ -39,8 +39,11 @@ export async function* getDiff(cid1: CID, cid2: CID): AsyncIterable<CID[]> {
if (leaf1.leaf && leaf2.leaf) {
const [added1, added2] = compareLeafs('', leaf1, leaf2, [], [])
// only look on what was added in cid2
if (added1.length != 0) console.log('ignoring missing index request ' + added1)
else yield added2.map(([_k, v]) => v)
if (added1.length != 0) {
// console.debug('ignoring missing index request ' + added1)
} else {
yield added2.map(([_k, v]) => v)
}
} else if (inode1.children && inode2.children) {
// do the inode comparison
yield* getDiffInodes(inode1, inode2)
......@@ -70,7 +73,7 @@ async function* getDiffInodes(inode1: IndexInode, inode2: IndexInode): AsyncIter
}
if (ri == null) {
// left is not null and was added, ignore
console.log(`ignoring missing data at ${ctx}${li[0]}: ${li[1]}`)
// console.debug(`ignoring missing data at ${ctx}${li[0]}: ${li[1]}`)
continue
}
......@@ -115,7 +118,7 @@ async function* getDiffInodes(inode1: IndexInode, inode2: IndexInode): AsyncIter
// content is then completely different
// ignore what's new in left as removed
// only yield what's new in right
console.log('ignoring removed value ' + lic)
// console.log('ignoring removed value ' + lic)
yield* getAll(ric)
continue
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment