Skip to content
Snippets Groups Projects
Commit fa50d4f2 authored by poka's avatar poka
Browse files

feat: can switch dev/prod

parent 2a5863b6
No related branches found
No related tags found
No related merge requests found
FROM denoland/deno:alpine
WORKDIR /app
COPY index.ts .
COPY lib ./lib
ENV PRODUCTION=true
# cache deps
RUN deno cache index.ts
EXPOSE 3000
CMD ["deno", "run", "--allow-env", "--allow-read", "--allow-write", "--allow-net", "index.ts"]
FROM hasura/graphql-engine:v2.36.0
COPY ./hasura /hasura
COPY ./scripts/init-hasura.sh /init-hasura.sh
COPY ./config.yaml /config.yaml
RUN curl -sL https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | bash 2>/dev/null
version: "3.6"
services:
postgres-datapod:
ports:
- "5432:5432"
graphql-engine-datapod:
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
PRODUCTION: "false"
volumes:
- ./hasura:/hasura
- ./scripts/init-hasura.sh:/init-hasura.sh
- ./config.yaml:/config.yaml
version: "3.6"
services:
graphql-engine-datapod:
environment:
PRODUCTION: "true"
datapod-app:
image: duniter-datapod-app:latest
depends_on:
postgres-datapod:
condition: service_healthy
restart: always
environment:
DB_USER: ${DB_USER}
DB_PASSWORD: ${DB_PASSWORD}
DB_DATABASE: ${DB_DATABASE}
version: "3.6" version: "3.6"
services: services:
postgres: postgres-datapod:
image: postgres:15 image: postgres:15
restart: always restart: always
volumes: volumes:
- db_data:/var/lib/postgresql/data - db_data:/var/lib/postgresql/data
# - ./hasura/migrations:/docker-entrypoint-initdb.d
ports:
- "5432:5432"
environment: environment:
POSTGRES_USER: ${DB_USER:-postgres} POSTGRES_USER: ${DB_USER:-postgres}
POSTGRES_PASSWORD: ${DB_PASSWORD:-postgrespassword} POSTGRES_PASSWORD: ${DB_PASSWORD:-postgrespassword}
...@@ -18,39 +15,21 @@ services: ...@@ -18,39 +15,21 @@ services:
timeout: 2s timeout: 2s
retries: 5 retries: 5
graphql-engine: graphql-engine-datapod:
image: hasura/graphql-engine:v2.36.0 image: duniter-datapod-hasura:latest
depends_on: depends_on:
postgres: postgres-datapod:
condition: service_healthy condition: service_healthy
restart: always
ports: ports:
- "8080:8080" - "8080:8080"
restart: always
# network_mode: "host"
extra_hosts:
- "host.docker.internal:host-gateway"
environment: environment:
## postgres database to store Hasura metadata HASURA_GRAPHQL_DATABASE_URL: postgres://${DB_USER:-postgres}:${DB_PASSWORD:-postgrespassword}@postgres-datapod:5432/${DB_DATABASE:-postgres}
HASURA_GRAPHQL_DATABASE_URL: postgres://${DB_USER:-postgres}:${DB_PASSWORD:-postgrespassword}@postgres:5432/${DB_DATABASE:-postgres} HASURA_GRAPHQL_ENABLE_CONSOLE: "true"
## enable the console served by server
HASURA_GRAPHQL_ENABLE_CONSOLE: "true" # set to "false" to disable console
## enable debugging mode. It is recommended to disable this in production
HASURA_GRAPHQL_DEV_MODE: "true" HASURA_GRAPHQL_DEV_MODE: "true"
HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log
## uncomment next line to run console offline (i.e load console assets from server instead of CDN)
# HASURA_GRAPHQL_CONSOLE_ASSETS_DIR: /srv/console-assets
## uncomment next line to set an admin secret
HASURA_GRAPHQL_ADMIN_SECRET: ${HASURA_GRAPHQL_ADMIN_SECRET} HASURA_GRAPHQL_ADMIN_SECRET: ${HASURA_GRAPHQL_ADMIN_SECRET}
# HASURA_GRAPHQL_METADATA_DEFAULTS: '{"backend_configs":{"dataconnector":{"athena":{"uri":"http://data-connector-agent:8081/api/v1/athena"},"mariadb":{"uri":"http://data-connector-agent:8081/api/v1/mariadb"},"mysql8":{"uri":"http://data-connector-agent:8081/api/v1/mysql"},"oracle":{"uri":"http://data-connector-agent:8081/api/v1/oracle"},"snowflake":{"uri":"http://data-connector-agent:8081/api/v1/snowflake"}}}}'
## Name of role when the Authorization header is absent in JWT
HASURA_GRAPHQL_UNAUTHORIZED_ROLE: public HASURA_GRAPHQL_UNAUTHORIZED_ROLE: public
## Disable telemetry
HASURA_GRAPHQL_ENABLE_TELEMETRY: "false" HASURA_GRAPHQL_ENABLE_TELEMETRY: "false"
## Define metadata and migrations directories
volumes: # for local developement, you want to record the database migrations in git
- ./hasura:/hasura
- ./scripts/init-hasura.sh:/init-hasura.sh
- ./config.yaml:/config.yaml
command: sh /init-hasura.sh command: sh /init-hasura.sh
volumes: volumes:
......
...@@ -3,21 +3,37 @@ import { Client } from "https://deno.land/x/postgres@v0.17.0/mod.ts"; ...@@ -3,21 +3,37 @@ import { Client } from "https://deno.land/x/postgres@v0.17.0/mod.ts";
import { load } from "https://deno.land/std@0.209.0/dotenv/mod.ts"; import { load } from "https://deno.land/std@0.209.0/dotenv/mod.ts";
import { updateProfile } from "./lib/update_profile.ts"; import { updateProfile } from "./lib/update_profile.ts";
let dbUser, dbDatabase, dbPassword, dbHostname;
const dbPort = 5432;
const isProduction = Deno.env.get("PRODUCTION") === "true";
if (isProduction) {
dbUser = Deno.env.get("DB_USER");
dbDatabase = Deno.env.get("DB_DATABASE");
dbPassword = Deno.env.get("DB_PASSWORD");
dbHostname = "postgres-datapod";
} else {
const env = await load(); const env = await load();
dbUser = env["DB_USER"];
dbDatabase = env["DB_DATABASE"];
dbPassword = env["DB_PASSWORD"];
dbHostname = "localhost";
}
const client = new Client({ const client = new Client({
user: env["DB_USER"], user: dbUser,
database: env["DB_DATABASE"], database: dbDatabase,
hostname: "localhost", hostname: dbHostname,
password: env["DB_PASSWORD"], password: dbPassword,
port: 5432, port: dbPort,
}); });
await client.connect() await client.connect()
const app = new Application(); const app = new Application();
const router = new Router(); const router = new Router();
// Manage route /update-profile-data // Manage routes
router.post("/update-profile-data", async (ctx: Context) => await updateProfile(ctx, client)); router.post("/update-profile-data", async (ctx: Context) => await updateProfile(ctx, client));
app.use(router.routes()); app.use(router.routes());
......
...@@ -9,8 +9,6 @@ export async function updateProfile(ctx: Context, client: Client) { ...@@ -9,8 +9,6 @@ export async function updateProfile(ctx: Context, client: Client) {
const bodyValue = body.variables ? body.variables : (body.input ? body.input : {}); const bodyValue = body.variables ? body.variables : (body.input ? body.input : {});
const { address, hash, signature, avatarBase64, description, geoloc } = bodyValue; const { address, hash, signature, avatarBase64, description, geoloc } = bodyValue;
// console.log(bodyValue);
// Verify signature // Verify signature
const playload = JSON.stringify({description, avatarBase64, geoloc}); const playload = JSON.stringify({description, avatarBase64, geoloc});
if (!await verifySignature(address, signature, hash, playload)) { if (!await verifySignature(address, signature, hash, playload)) {
......
load.sh 0 → 100755
#!/bin/bash
option=$1
log=$2
if [[ $option == "dev" ]]; then
echo "Start datapod in dev mode"
docker compose -f docker-compose.yml -f docker-compose.prod.yml -f docker-compose.override.yml down -v
docker compose up -d
deno run --allow-env --allow-read --allow-write --allow-net --watch index.ts
else
echo "Start datapod in production mode"
docker compose -f docker-compose.yml -f docker-compose.prod.yml -f docker-compose.override.yml down
docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d
[[ $log == "log" ]] && docker compose -f docker-compose.yml -f docker-compose.prod.yml logs -f
fi
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
# start hasura in background # start hasura in background
graphql-engine serve & graphql-engine serve &
[ ! $(which hasura) ] && curl -sL https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | bash
endpoint="http://localhost:8080" endpoint="http://localhost:8080"
check_hasura_ready() { check_hasura_ready() {
...@@ -18,6 +16,11 @@ while [ $(check_hasura_ready) -ne 200 ]; do ...@@ -18,6 +16,11 @@ while [ $(check_hasura_ready) -ne 200 ]; do
done done
echo "Hasura is ready." echo "Hasura is ready."
if [[ $PRODUCTION == "true" ]]; then
sed -i 's/host.docker.internal:3000/datapod-app:3000/g' hasura/metadata/actions.yaml
else
sed -i 's/datapod-app:3000/host.docker.internal:3000/g' hasura/metadata/actions.yaml
fi
hasura migrate apply --endpoint $endpoint --admin-secret $HASURA_GRAPHQL_ADMIN_SECRET --database-name default hasura migrate apply --endpoint $endpoint --admin-secret $HASURA_GRAPHQL_ADMIN_SECRET --database-name default
hasura metadata apply --endpoint $endpoint --admin-secret $HASURA_GRAPHQL_ADMIN_SECRET hasura metadata apply --endpoint $endpoint --admin-secret $HASURA_GRAPHQL_ADMIN_SECRET
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment