Skip to content
Snippets Groups Projects
Commit fd9f6806 authored by Hugo Trentesaux's avatar Hugo Trentesaux
Browse files

add extra kubo instance

to expose RPC without risk on main kubo instance
parent 04352af7
No related branches found
No related tags found
No related merge requests found
Showing
with 211 additions and 114 deletions
...@@ -10,6 +10,7 @@ HASURA_LISTEN_PORT=8888 ...@@ -10,6 +10,7 @@ HASURA_LISTEN_PORT=8888
HASURA_GRAPHQL_ADMIN_SECRET=hasura_admin_secret HASURA_GRAPHQL_ADMIN_SECRET=hasura_admin_secret
# kubo configuration # kubo configuration
KUBO_PORT=4001 # should be open to incoming connections
KUBO_DOMAIN=datapod.coinduf.eu # domain (used in p2p peering) KUBO_DOMAIN=datapod.coinduf.eu # domain (used in p2p peering)
KUBO_GATEWAY_PORT=8080 # listen port of ipfs http gateway KUBO_GATEWAY_PORT=8080 # listen port of ipfs http gateway
KUBO_GATEWAY_DOMAIN=gateway.datapod.coinduf.eu # domain for kubo reverse proxy ipfs http gateway reverse proxy KUBO_GATEWAY_DOMAIN=gateway.datapod.coinduf.eu # domain for kubo reverse proxy ipfs http gateway reverse proxy
......
FROM ipfs/kubo:v0.30.0
COPY ./scripts/configure.sh /container-init.d/001-configure.sh
CMD ["daemon", "--enable-pubsub-experiment"]
# docker buildx build -f Dockerfile.Kubo . -t h30x/datapod-kubo
# docker image push h30x/datapod-kubo
\ No newline at end of file
...@@ -12,7 +12,7 @@ Duniter Datapod is designed for offchain storage of Ğ1 data but does not depend ...@@ -12,7 +12,7 @@ Duniter Datapod is designed for offchain storage of Ğ1 data but does not depend
## Use ## Use
To start a full indexer in production mode with docker, use the [`docker-compose.prod.yml`](./docker-compose.prod.yml) and [`.env.prod.example`](./.env.prod.example) files: To start a full indexer in production mode with docker, use the [`docker-compose.prod.yml`](./docker/docker-compose.prod.yml) and [`.env.prod.example`](./.env.prod.example) files:
```sh ```sh
# edit env file # edit env file
...@@ -43,9 +43,9 @@ pnpm install ...@@ -43,9 +43,9 @@ pnpm install
Start a kubo node with pubsub and postgres/hasura Start a kubo node with pubsub and postgres/hasura
```sh ```sh
# start kubo node TODO put this in the docker # start kubo node (easier to have it on system instead of docker)
ipfs daemon --enable-pubsub-experiment ipfs daemon --enable-pubsub-experiment
# start postgres / hasura # start postgres / hasura from dev docker compose file
docker compose up -d docker compose up -d
``` ```
......
# replace the following: # replace the following:
# RPC_KUBO_DOMAIN = the name under which expose your RPC API # RPC_KUBO_DOMAIN = the name under which expose your RPC API
# RPC_KUBO_PORT = the rpc api port (ususally 5001) # RPC_KUBO_PORT = the rpc api port (ususally 5001 or 6001)
server { server {
listen 80 ; listen 80 ;
...@@ -26,6 +26,10 @@ server { ...@@ -26,6 +26,10 @@ server {
location / { location / {
return 400; return 400;
} }
location /api/v0/file/ {
proxy_pass http://localhost:RPC_KUBO_PORT;
proxy_set_header Host $host;
}
location /api/v0/dag/ { location /api/v0/dag/ {
proxy_pass http://localhost:RPC_KUBO_PORT; proxy_pass http://localhost:RPC_KUBO_PORT;
proxy_set_header Host $host; proxy_set_header Host $host;
......
File moved
File moved
# kubo config suited for datapod use
# TODO split gateway in another instance for performance and safeness and separation of concerns
FROM ipfs/kubo:v0.30.0
COPY ./scripts/configure-keys.sh /container-init.d/001-configure.sh
COPY ./scripts/configure-addresses.sh /container-init.d/002-configure.sh
COPY ./scripts/configure-bootstrap.sh /container-init.d/003-configure.sh
COPY ./scripts/configure-peering.sh /container-init.d/004-configure.sh
COPY ./scripts/configure-rpc.sh /container-init.d/005-configure.sh
COPY ./scripts/configure-gateway.sh /container-init.d/006-configure.sh
COPY ./scripts/configure-extra.sh /container-init.d/007-configure.sh
CMD ["daemon", "--enable-pubsub-experiment"]
\ No newline at end of file
# kubo config suited for unsafe remote RPC instance
FROM ipfs/kubo:v0.30.0
COPY ./scripts/configure-addresses.sh /container-init.d/001-configure.sh
COPY ./scripts/configure-bootstrap.sh /container-init.d/002-configure.sh
COPY ./scripts/configure-peering.sh /container-init.d/003-configure.sh
COPY ./scripts/configure-rpc-public.sh /container-init.d/004-configure.sh
CMD ["daemon"]
\ No newline at end of file
...@@ -2,10 +2,10 @@ ...@@ -2,10 +2,10 @@
set -e set -e
# Get the version number from package.json # Get the version number from package.json
version_tag=$(grep -oP '"version": "\K[^"]+' package.json) version_tag=$(grep -oP '"version": "\K[^"]+' ./package.json)
# # --- datapod # # --- datapod
# docker buildx build -f Dockerfile -t duniter-datapod . # docker buildx build -f ./docker/Dockerfile -t duniter-datapod .
# # Tag with version and 'latest' # # Tag with version and 'latest'
# docker image tag duniter-datapod h30x/duniter-datapod:$version_tag # docker image tag duniter-datapod h30x/duniter-datapod:$version_tag
...@@ -15,19 +15,30 @@ version_tag=$(grep -oP '"version": "\K[^"]+' package.json) ...@@ -15,19 +15,30 @@ version_tag=$(grep -oP '"version": "\K[^"]+' package.json)
# docker image push h30x/duniter-datapod:$version_tag # docker image push h30x/duniter-datapod:$version_tag
# docker image push h30x/duniter-datapod:latest # docker image push h30x/duniter-datapod:latest
# --- kubo # # --- kubo
docker buildx build -f Dockerfile.Kubo -t datapod-kubo . # docker buildx build -f ./docker/Dockerfile.Kubo -t datapod-kubo .
# # Tag with version and 'latest'
# docker image tag datapod-kubo h30x/datapod-kubo:$version_tag
# docker image tag datapod-kubo h30x/datapod-kubo:latest
# # Push both
# docker image push h30x/datapod-kubo:$version_tag
# docker image push h30x/datapod-kubo:latest
# --- kubo-rpc
docker buildx build -f ./docker/Dockerfile.KuboRpc -t datapod-kubo-rpc .
# Tag with version and 'latest' # Tag with version and 'latest'
docker image tag datapod-kubo h30x/datapod-kubo:$version_tag docker image tag datapod-kubo-rpc h30x/datapod-kubo-rpc:$version_tag
docker image tag datapod-kubo h30x/datapod-kubo:latest docker image tag datapod-kubo-rpc h30x/datapod-kubo-rpc:latest
# Push both # Push both
docker image push h30x/datapod-kubo:$version_tag docker image push h30x/datapod-kubo-rpc:$version_tag
docker image push h30x/datapod-kubo:latest docker image push h30x/datapod-kubo-rpc:latest
# # --- hasura # # --- hasura
# docker buildx build -f Dockerfile.Hasura -t datapod-hasura . # docker buildx build -f ./docker/Dockerfile.Hasura -t datapod-hasura .
# # Tag with version and 'latest' # # Tag with version and 'latest'
# docker image tag datapod-hasura h30x/datapod-hasura:$version_tag # docker image tag datapod-hasura h30x/datapod-hasura:$version_tag
......
...@@ -37,8 +37,8 @@ services: ...@@ -37,8 +37,8 @@ services:
image: h30x/datapod-kubo image: h30x/datapod-kubo
ports: ports:
# p2p port public (tcp, udp, webtransport, webrtc) # p2p port public (tcp, udp, webtransport, webrtc)
- 4001:4001/tcp - ${KUBO_PORT}:4001/tcp
- 4001:4001/udp - ${KUBO_PORT}:4001/udp
# public gateway # public gateway
- 127.0.0.1:${KUBO_GATEWAY_PORT}:8080 - 127.0.0.1:${KUBO_GATEWAY_PORT}:8080
- '[::1]:${KUBO_GATEWAY_PORT}:8080' - '[::1]:${KUBO_GATEWAY_PORT}:8080'
...@@ -50,10 +50,27 @@ services: ...@@ -50,10 +50,27 @@ services:
# - kubo_init:/container-init.d # - kubo_init:/container-init.d
environment: environment:
KUBO_DOMAIN: ${KUBO_DOMAIN} KUBO_DOMAIN: ${KUBO_DOMAIN}
KUBO_PORT: ${KUBO_PORT}
KUBO_GATEWAY_DOMAIN: ${KUBO_GATEWAY_DOMAIN} KUBO_GATEWAY_DOMAIN: ${KUBO_GATEWAY_DOMAIN}
KUBO_GATEWAY_SUBDOMAIN: ${KUBO_GATEWAY_SUBDOMAIN} KUBO_GATEWAY_SUBDOMAIN: ${KUBO_GATEWAY_SUBDOMAIN}
restart: always restart: always
# ------
kubo-rpc:
image: h30x/datapod-kubo-rpc
ports:
# p2p port public (tcp, udp, webtransport, webrtc)
- 4002:4001/tcp
- 4002:4001/udp
# expose RPC to partial reverse proxy
- 127.0.0.1:6001:5001
volumes:
- kubo-rpc_data:/data/ipfs
environment:
KUBO_DOMAIN: ${KUBO_DOMAIN}
KUBO_PORT: 4002
restart: always
# ------ # ------
# optional kubo pubsub to see what the node receives on pubsub # optional kubo pubsub to see what the node receives on pubsub
pubsub: pubsub:
...@@ -86,4 +103,5 @@ services: ...@@ -86,4 +103,5 @@ services:
volumes: volumes:
db_data: db_data:
kubo_data: kubo_data:
kubo-rpc_data:
kubo_init: kubo_init:
J'arrive plus à réflechir alors j'écris :
- c'est facile pour un noeud ipfs de récupérer la liste des ses clés (paires nom/clé)
- quand on bootstrap un datapod, il doit récupérer plusieurs objets
- quand on synchronise un datapod, il doit pouvoir récupérer plusieurs objets manquants
- on aimerait avoir à transmettre le minimum d'info pour la configuration du noeud
- avoir un seul lien à ajouter dans l'interface d'admin en plus des hardcodés serait l'idéal
- on dispose déjà d'une liste hardcodée qu'est la liste de pair
- dedans il y a les clés publiques des pairs (12D3KooWF44SaSomGuUSNycgpkRwQcxcsMYeNbtn6XCHPR2ojodv par ex)
- ces clés peuvent être résolues par IPNS
\ No newline at end of file
#!/bin/sh
set -ex
# --- addresses ---
# enable p2p, quic, webtransport, webrtc
# ipfs config Swarm.Transports.Network.Websocket --json true
# internal port is always 4001
ipfs config Addresses.Swarm --json '[
"/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001",
"/ip4/0.0.0.0/udp/4001/quic-v1",
"/ip6/::/udp/4001/quic-v1",
"/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
"/ip6/::/udp/4001/quic-v1/webtransport",
"/ip4/0.0.0.0/udp/4001/webrtc-direct",
"/ip6/::/udp/4001/webrtc-direct"
]'
# configure the addresses to announce
# KUBO_PORT is external port mapped in docker compose to 4001
ipfs config Addresses.Announce --json "[
\"/dns/$KUBO_DOMAIN/tcp/$KUBO_PORT\",
\"/dns/$KUBO_DOMAIN/udp/$KUBO_PORT/quic-v1\",
\"/dns/$KUBO_DOMAIN/udp/$KUBO_PORT/quic-v1/webtransport\",
\"/dns/$KUBO_DOMAIN/udp/$KUBO_PORT/webrtc-direct\"
]"
#!/bin/sh
set -ex
# --- bootstrap ---
# remove default bootstrap nodes
ipfs bootstrap rm all
# add custom bootstrap (hugo, poka, aya)
ipfs config Bootstrap --json '[
"/dns/datapod.coinduf.eu/tcp/4001/p2p/12D3KooWFp4JsoDo5FX8CFLtyJjaWWRZ8q3gr8uT2s9To2GYzRNA",
"/dns/gateway.datapod.ipfs.p2p.legal/tcp/4001/p2p/12D3KooWEaBZ3JfeXJayneVdpc71iUYWzeykGxzEq4BFWpPTv5wn",
"/dns/ipfs.asycn.io/tcp/4001/p2p/12D3KooWJnzYzJBtruXZwUQJriF1ePtDQCUQp4aNBV5FjpYVdfhc",
"/dns/datapod.gyroi.de/tcp/4001/p2p/12D3KooWAHf2cyDysXXP1xaAt75dNviRhF2T9QfnQGGZ6kSXvMwK"
]'
\ No newline at end of file
#!/bin/sh
set -ex
# --- IPNS ---
# use pubsub for IPNS records
# ipfs config --json Ipns.UsePubsub true
# republish records frequently
ipfs config --json Ipns.RepublishPeriod '"5m"'
\ No newline at end of file
#!/bin/sh
set -ex
# --- gateway ---
# prevent gateway from fetching foreign data
# ipfs config Gateway.NoFetch --json true
# ipfs config Gateway.NoFetch --json false
# public gateway without subdomain (no wildcard)
# enables /ipfs and /routing (delegated routing)
# public gateway with subdomain (needs wildcard)
ipfs config Gateway.PublicGateways --json "{
\"$KUBO_GATEWAY_DOMAIN\": { \"UseSubdomains\": false, \"Paths\": [\"/ipfs\", \"/routing\"] },
\"$KUBO_GATEWAY_SUBDOMAIN\": { \"UseSubdomains\": true, \"Paths\": [\"/ipfs\", \"/ipns\"] }
}"
ipfs config Gateway.ExposeRoutingAPI --json true
# only reprovide pinned data
# ipfs config Reprovider.Strategy "pinned"
# ipfs config Reprovider.Strategy --json null
#!/bin/sh
set -ex
# --- keys ---
# generate key for index history if not defined
ipfs key gen dd_root || true
ipfs key gen dd_tamt || true
ipfs key gen dd_tamt_hist || true
ipfs key gen dd_profiles || true
#!/bin/sh
set -ex
# --- peering ---
# add known peers (poka, hugo, aya)
ipfs config Peering.Peers --json '[
{
"ID": "12D3KooWEaBZ3JfeXJayneVdpc71iUYWzeykGxzEq4BFWpPTv5wn",
"Addrs": ["/dns/gateway.datapod.ipfs.p2p.legal/tcp/4001"]
},
{
"ID": "12D3KooWFp4JsoDo5FX8CFLtyJjaWWRZ8q3gr8uT2s9To2GYzRNA",
"Addrs": ["/dns/datapod.coinduf.eu/tcp/4001"]
},
{
"ID": "12D3KooWJnzYzJBtruXZwUQJriF1ePtDQCUQp4aNBV5FjpYVdfhc",
"Addrs": ["/dns/ipfs.asycn.io/tcp/4001"]
},
{
"ID": "12D3KooWAHf2cyDysXXP1xaAt75dNviRhF2T9QfnQGGZ6kSXvMwK",
"Addrs": ["/dns/datapod.gyroi.de/tcp/4001"]
}
]
'
# configure as a dht to avoid unnecessary http routing
ipfs config Routing.Type dht
#!/bin/sh
set -ex
# --- rpc ---
ipfs config API.HTTPHeaders.Access-Control-Allow-Origin --json '["http://127.0.0.1:5001","http://127.0.0.1:6001","http://127.0.0.1:6002"]'
ipfs config API.HTTPHeaders.Access-Control-Allow-Methods --json '["PUT", "POST"]'
#!/bin/sh
set -ex
# --- rpc ---
# allow easy access through ssh tunnel on port 500x
# ssh -NL 5002:localhost:500x datapod
# ipfs --api=/ip4/127.0.0.1/tcp/500x
ipfs config API.HTTPHeaders.Access-Control-Allow-Origin --json '["http://127.0.0.1:5001","http://127.0.0.1:5002","http://127.0.0.1:5003","http://127.0.0.1:5004","http://127.0.0.1:5005"]'
ipfs config API.HTTPHeaders.Access-Control-Allow-Methods --json '["PUT", "POST"]'
#!/bin/sh
set -ex
# --- keys ---
# generate key for index history if not defined
ipfs key gen dd_root || true
ipfs key gen dd_tamt || true
ipfs key gen dd_tamt_hist || true
ipfs key gen dd_profiles || true
# --- addresses ---
# enable p2p, quic, webtransport, webrtc
# ipfs config Swarm.Transports.Network.Websocket --json true
ipfs config Addresses.Swarm --json '[
"/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001",
"/ip4/0.0.0.0/udp/4001/quic-v1",
"/ip6/::/udp/4001/quic-v1",
"/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
"/ip6/::/udp/4001/quic-v1/webtransport",
"/ip4/0.0.0.0/udp/4001/webrtc-direct",
"/ip6/::/udp/4001/webrtc-direct"
]'
# configure the addresses to announce # TODO announce IPv6
ipfs config Addresses.Announce --json "[
\"/dns/$KUBO_DOMAIN/tcp/4001\",
\"/dns/$KUBO_DOMAIN/udp/4001/quic-v1\",
\"/dns/$KUBO_DOMAIN/udp/4001/quic-v1/webtransport\",
\"/dns/$KUBO_DOMAIN/udp/4001/webrtc-direct\"
]"
# --- peering ---
# remove default bootstrap nodes
ipfs bootstrap rm all
# add custom bootstrap (hugo, poka, aya)
ipfs config Bootstrap --json '[
"/dns/datapod.coinduf.eu/tcp/4001/p2p/12D3KooWFp4JsoDo5FX8CFLtyJjaWWRZ8q3gr8uT2s9To2GYzRNA",
"/dns/gateway.datapod.ipfs.p2p.legal/tcp/4001/p2p/12D3KooWEaBZ3JfeXJayneVdpc71iUYWzeykGxzEq4BFWpPTv5wn",
"/dns/ipfs.asycn.io/tcp/4001/p2p/12D3KooWJnzYzJBtruXZwUQJriF1ePtDQCUQp4aNBV5FjpYVdfhc",
"/dns/datapod.gyroi.de/tcp/4001/p2p/12D3KooWAHf2cyDysXXP1xaAt75dNviRhF2T9QfnQGGZ6kSXvMwK"
]'
# add known peers (poka, hugo, aya)
ipfs config Peering.Peers --json '[
{
"ID": "12D3KooWEaBZ3JfeXJayneVdpc71iUYWzeykGxzEq4BFWpPTv5wn",
"Addrs": ["/dns/gateway.datapod.ipfs.p2p.legal/tcp/4001"]
},
{
"ID": "12D3KooWFp4JsoDo5FX8CFLtyJjaWWRZ8q3gr8uT2s9To2GYzRNA",
"Addrs": ["/dns/datapod.coinduf.eu/tcp/4001"]
},
{
"ID": "12D3KooWJnzYzJBtruXZwUQJriF1ePtDQCUQp4aNBV5FjpYVdfhc",
"Addrs": ["/dns/ipfs.asycn.io/tcp/4001"]
},
{
"ID": "12D3KooWAHf2cyDysXXP1xaAt75dNviRhF2T9QfnQGGZ6kSXvMwK",
"Addrs": ["/dns/datapod.gyroi.de/tcp/4001"]
}
]
'
# configure as a dht to avoid unnecessary http routing
ipfs config Routing.Type dht
# --- rpc ---
# allow easy access through ssh tunnel on port 500x
# ssh -NL 5002:localhost:500x datapod
# ipfs --api=/ip4/127.0.0.1/tcp/500x
ipfs config API.HTTPHeaders.Access-Control-Allow-Origin --json '["http://127.0.0.1:5001","http://127.0.0.1:5002","http://127.0.0.1:5003","http://127.0.0.1:5004","http://127.0.0.1:5005"]'
ipfs config API.HTTPHeaders.Access-Control-Allow-Methods --json '["PUT", "POST"]'
# --- gateway ---
# prevent gateway from fetching foreign data
# ipfs config Gateway.NoFetch --json true
# ipfs config Gateway.NoFetch --json false
# public gateway without subdomain (no wildcard)
# enables /ipfs and /routing (delegated routing)
# public gateway with subdomain (needs wildcard)
ipfs config Gateway.PublicGateways --json "{
\"$KUBO_GATEWAY_DOMAIN\": { \"UseSubdomains\": false, \"Paths\": [\"/ipfs\", \"/routing\"] },
\"$KUBO_GATEWAY_SUBDOMAIN\": { \"UseSubdomains\": true, \"Paths\": [\"/ipfs\", \"/ipns\"] }
}"
ipfs config Gateway.ExposeRoutingAPI --json true
# only reprovide pinned data
# ipfs config Reprovider.Strategy "pinned"
# ipfs config Reprovider.Strategy --json null
# --- IPNS ---
# use pubsub for IPNS records
# ipfs config --json Ipns.UsePubsub true
# republish records frequently
ipfs config --json Ipns.RepublishPeriod '"5m"'
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment