diff --git a/README.md b/README.md
index bbdf2433f8f48829ec6f5a9d28f55d90b5880ea5..320b02a91a72816365f11a7bc335a17ed7860a1d 100644
--- a/README.md
+++ b/README.md
@@ -7,18 +7,22 @@ Duniter Datapod is designed for offchain storage of Ğ1 data but does not depend
 - a dev tool in Vue to understand the architecture, explore the data, and debug
 
 ![scheme](./doc/scheme.svg)
-> sheme of data flow in Duniter Datapod 
+
+> sheme of data flow in Duniter Datapod
 
 ## Use
 
-To start a full indexer in production mode with docker, use the [`docker-compose.prod.yml`](./docker-compose.prod.yml) file:
+To start a full indexer in production mode with docker, use the [`docker-compose.prod.yml`](./docker-compose.prod.yml) and [`.env.prod.example`](./.env.prod.example) files:
 
 ```sh
-# start
+# edit env file
+vim .env
+# start services
 docker compose up -d
 ```
 
 This will pull preconfigured images for postgres/hasura, kubo and datapod. This should:
+
 - connect to existing network
 - start collecting from default IPNS
 - index to database
@@ -75,25 +79,25 @@ More detail in the doc below.
 ## TODO
 
 Bugs
-- [ ] initialize dd_keys for new node (→ bootstrap)
+
+- [x] initialize dd_keys for new node (→ bootstrap)
 - [ ] fix merging blocked when inode unreachable, timeout seems ignored
-- [ ] fix pubsub cid can not be fetched triggers pubsub abort (dirty workaround for know)
-- [ ] fix `UND_ERR_HEADERS_TIMEOUT` that happen very often when pinning 📌
+- [ ] fix pubsub socket closing after 5 minutes
+- [ ] fix `UND_ERR_HEADERS_TIMEOUT` that happen often for unknown reasons
+
 Features
+
 - [x] pubkey instead of ss58 address if we want data to be compatible across networks → ss58
-- [ ] add periodic sync with a list of trusted peers (IPNS entries)
+- [x] add periodic sync with a list of trusted peers (IPNS entries)
 - [ ] split indexer vue app from backend indexer and improve node admin app
-    - [x] clarify the purpose of the the main TAMT
-    - [ ] clarify the adressing format in the tables
-    - [ ] add domain specific index for profile for example
-    - [ ] add a refcount to count the number of documents
-    - [ ] make the app build in prod mode
-    - [ ] allow connecting the app to a custom RPC endpoint
-- [ ] manage unpin requests when user/admin wants to delete data, see refcount
+  - [x] clarify the purpose of the the main TAMT
+  - [ ] clarify the adressing format in the tables
+  - [x] count the number of documents
+  - [ ] make the app build in prod mode
+  - [ ] allow connecting the app to a custom RPC endpoint
+- [ ] manage unpin requests when user/admin wants to delete data (requires ipfs refcount)
 - [x] document dev database change with tracking hasura console and squashing migrations
 - [ ] add transaction comment (onchain + offchain to allow unified search)
-- [ ] add version history to database (history of index request CIDs) -> not systematic
 - [ ] update description of pubkey field to "ss58/address"
-- [ ] add ability to remove a node as well as its parent if it leaves it empty
-- [ ] make base custom per tree (base 16, base 32)
-- [ ] 
+- [ ] add ability to remove a node (and its parents as well if they become empty)
+- [ ]
diff --git a/scripts/configure.sh b/scripts/configure.sh
index 6e26a9e07c42fb7fcdd6d6efef5f919a93ca2adc..642dc5d2e88cc89b678f022970680bfef80e1bb9 100644
--- a/scripts/configure.sh
+++ b/scripts/configure.sh
@@ -16,6 +16,8 @@ ipfs config Addresses.Swarm --json '[
   "/ip6/::/tcp/4001",
   "/ip4/0.0.0.0/tcp/4002/ws",
   "/ip6/::/tcp/4002/ws",
+  "/ip4/0.0.0.0/udp/4001/webrtc-direct",
+  "/ip6/::/udp/4001/webrtc-direct",
   "/ip4/0.0.0.0/udp/4001/quic-v1",
   "/ip6/::/udp/4001/quic-v1",
   "/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
@@ -90,4 +92,4 @@ ipfs config Gateway.ExposeRoutingAPI --json true
 # use pubsub for IPNS records
 # ipfs config --json Ipns.UsePubsub true
 # republish records frequently
-ipfs config --json Ipns.RepublishPeriod '"1min"'
\ No newline at end of file
+ipfs config --json Ipns.RepublishPeriod '"5m"'
\ No newline at end of file
diff --git a/src/indexer/database.ts b/src/indexer/database.ts
index e2a72e9a5059449ad7e65e862e5149a51e78e1d1..517a78c1b47ae9180e46c613d51ca64a7df54172 100644
--- a/src/indexer/database.ts
+++ b/src/indexer/database.ts
@@ -143,6 +143,7 @@ const cesiumPlusProfileRaw: QueryBuilder = {
       const fileCandidate = { content: new Uint8Array(buffer) }
       // optimization: compute the hash locally without submitting it to kubo
       // difficulty: check that the hash is the same
+      // FIXME adding the avatar like this causes a lot of computation
       cplus.avatar = (await kubo.add(fileCandidate)).cid
     }
     return cplus
diff --git a/src/indexer/handlers.ts b/src/indexer/handlers.ts
index 05e0933ad9bf3ab2e5f2856129b06a621aab54e9..ffe2611c199375dbaf934a8bac38d256b6e2d0c2 100644
--- a/src/indexer/handlers.ts
+++ b/src/indexer/handlers.ts
@@ -192,7 +192,7 @@ export async function computeDiff(fromCID: CID, toCID: CID): Promise<void> {
           events.emit(evtype.triggerCollect)
           // This is a hack to limit injestion of new data and let time to process all
           // for 100 000 documents with a batch size of 1000 and 3 seconds, it is adding 5 minutes overall
-          await setTimeout(3000) // 3 sec
+          // await setTimeout(3000) // 3 sec
         }
       }
       events.emit(evtype.triggerCollect)
diff --git a/src/interface.ts b/src/interface.ts
index 556ee3ea2757c0e9abad1d3079e6bb0aa16ee009..d8097f17124fb71f8eb80d2bd613d83008df5eb1 100644
--- a/src/interface.ts
+++ b/src/interface.ts
@@ -39,8 +39,11 @@ export async function* getDiff(cid1: CID, cid2: CID): AsyncIterable<CID[]> {
   if (leaf1.leaf && leaf2.leaf) {
     const [added1, added2] = compareLeafs('', leaf1, leaf2, [], [])
     // only look on what was added in cid2
-    if (added1.length != 0) console.log('ignoring missing index request ' + added1)
-    else yield added2.map(([_k, v]) => v)
+    if (added1.length != 0) {
+      // console.debug('ignoring missing index request ' + added1)
+    } else {
+      yield added2.map(([_k, v]) => v)
+    }
   } else if (inode1.children && inode2.children) {
     // do the inode comparison
     yield* getDiffInodes(inode1, inode2)
@@ -70,7 +73,7 @@ async function* getDiffInodes(inode1: IndexInode, inode2: IndexInode): AsyncIter
     }
     if (ri == null) {
       // left is not null and was added, ignore
-      console.log(`ignoring missing data at ${ctx}${li[0]}: ${li[1]}`)
+      // console.debug(`ignoring missing data at ${ctx}${li[0]}: ${li[1]}`)
       continue
     }
 
@@ -115,7 +118,7 @@ async function* getDiffInodes(inode1: IndexInode, inode2: IndexInode): AsyncIter
     // content is then completely different
     // ignore what's new in left as removed
     // only yield what's new in right
-    console.log('ignoring removed value ' + lic)
+    // console.log('ignoring removed value ' + lic)
     yield* getAll(ric)
     continue
   }