diff --git a/.env.example b/.env.example index d554b30..100e153 100644 --- a/.env.example +++ b/.env.example @@ -55,3 +55,12 @@ ENABLE_DA_TRACKING=false # FAUCET_PRIVATE_KEY=0x... # FAUCET_AMOUNT=0.01 # FAUCET_COOLDOWN_MINUTES=30 + +# Optional snapshot feature (daily pg_dump backups) +# SNAPSHOT_ENABLED=false +# SNAPSHOT_TIME=03:00 # UTC time (HH:MM) to run daily pg_dump +# SNAPSHOT_RETENTION=7 # Number of snapshot files to keep +# SNAPSHOT_DIR=/snapshots # Container path for snapshots +# SNAPSHOT_HOST_DIR=./snapshots # Host path mounted to SNAPSHOT_DIR +# UID=1000 # Optional: host UID for writable snapshot bind mounts +# GID=1000 # Optional: host GID for writable snapshot bind mounts diff --git a/.gitignore b/.gitignore index 9bc3dd2..7125d61 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,9 @@ Thumbs.db *.log logs/ +# Local snapshot test artifacts +snapshots/ + # Node (frontend) frontend/node_modules/ frontend/dist/ diff --git a/backend/Dockerfile b/backend/Dockerfile index 4d827f1..9bd1806 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -11,11 +11,13 @@ RUN cargo build --release # Server image FROM alpine:3.21 AS server -RUN apk add --no-cache ca-certificates +RUN apk add --no-cache ca-certificates postgresql16-client COPY --from=builder /app/target/release/atlas-server /usr/local/bin/ -RUN addgroup -S atlas && adduser -S atlas -G atlas +RUN addgroup -S atlas && adduser -S atlas -G atlas \ + && mkdir -p /snapshots \ + && chown atlas:atlas /snapshots USER atlas EXPOSE 3000 diff --git a/backend/crates/atlas-server/Cargo.toml b/backend/crates/atlas-server/Cargo.toml index 0bb864a..4d8220e 100644 --- a/backend/crates/atlas-server/Cargo.toml +++ b/backend/crates/atlas-server/Cargo.toml @@ -45,3 +45,4 @@ tokio = { workspace = true } tower = { workspace = true, features = ["util"] } serde_json = { workspace = true } sqlx = { workspace = true } +tempfile = "3" diff --git a/backend/crates/atlas-server/src/config.rs b/backend/crates/atlas-server/src/config.rs index 2591c1a..6f0c399 100644 --- a/backend/crates/atlas-server/src/config.rs +++ b/backend/crates/atlas-server/src/config.rs @@ -1,6 +1,7 @@ use alloy::primitives::U256; use alloy::signers::local::PrivateKeySigner; use anyhow::{bail, Context, Result}; +use chrono::NaiveTime; use std::{env, str::FromStr}; const DEFAULT_DA_WORKER_CONCURRENCY: u32 = 50; @@ -251,6 +252,72 @@ impl FaucetConfig { } } +#[derive(Clone)] +pub struct SnapshotConfig { + pub enabled: bool, + pub time: NaiveTime, + pub retention: u32, + pub dir: String, + pub database_url: String, +} + +impl std::fmt::Debug for SnapshotConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SnapshotConfig") + .field("enabled", &self.enabled) + .field("time", &self.time) + .field("retention", &self.retention) + .field("dir", &self.dir) + .field("database_url", &"[redacted]") + .finish() + } +} + +impl SnapshotConfig { + pub fn from_env(database_url: &str) -> Result { + let enabled = env::var("SNAPSHOT_ENABLED") + .unwrap_or_else(|_| "false".to_string()) + .parse::() + .context("Invalid SNAPSHOT_ENABLED")?; + + if !enabled { + return Ok(Self { + enabled, + time: NaiveTime::from_hms_opt(3, 0, 0).unwrap(), + retention: 7, + dir: "/snapshots".to_string(), + database_url: database_url.to_string(), + }); + } + + let time_str = env::var("SNAPSHOT_TIME").unwrap_or_else(|_| "03:00".to_string()); + let time = NaiveTime::parse_from_str(&time_str, "%H:%M") + .context("Invalid SNAPSHOT_TIME (expected HH:MM)")?; + + let retention = env::var("SNAPSHOT_RETENTION") + .unwrap_or_else(|_| "7".to_string()) + .parse::() + .context("Invalid SNAPSHOT_RETENTION")?; + if retention == 0 { + bail!("SNAPSHOT_RETENTION must be greater than 0"); + } + + let dir = env::var("SNAPSHOT_DIR").unwrap_or_else(|_| "/snapshots".to_string()); + let dir = dir.trim().to_string(); + if dir.is_empty() { + bail!("SNAPSHOT_DIR must not be empty"); + } + + Ok(Self { + enabled, + time, + retention, + dir, + database_url: database_url.to_string(), + }) + } +} + fn parse_optional_env(val: Option) -> Option { val.map(|s| s.trim().to_string()).filter(|s| !s.is_empty()) } @@ -553,6 +620,111 @@ mod tests { ); } + fn clear_snapshot_env() { + env::remove_var("SNAPSHOT_ENABLED"); + env::remove_var("SNAPSHOT_TIME"); + env::remove_var("SNAPSHOT_RETENTION"); + env::remove_var("SNAPSHOT_DIR"); + } + + #[test] + fn snapshot_config_defaults_disabled() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + + let config = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap(); + assert!(!config.enabled); + assert_eq!(config.time, NaiveTime::from_hms_opt(3, 0, 0).unwrap()); + assert_eq!(config.retention, 7); + assert_eq!(config.dir, "/snapshots"); + } + + #[test] + fn snapshot_config_parses_valid_time() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + + for (input, hour, minute) in [("00:00", 0, 0), ("03:00", 3, 0), ("23:59", 23, 59)] { + env::set_var("SNAPSHOT_TIME", input); + let config = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap(); + assert_eq!( + config.time, + NaiveTime::from_hms_opt(hour, minute, 0).unwrap(), + "failed for input {input}" + ); + } + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_rejects_invalid_time() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + + for val in ["25:00", "abc", "12:60"] { + env::set_var("SNAPSHOT_TIME", val); + let err = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap_err(); + assert!( + err.to_string().contains("Invalid SNAPSHOT_TIME"), + "expected error for {val}, got: {err}" + ); + } + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_rejects_zero_retention() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + env::set_var("SNAPSHOT_RETENTION", "0"); + + let err = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap_err(); + assert!(err.to_string().contains("must be greater than 0")); + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_custom_dir() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + env::set_var("SNAPSHOT_DIR", "/data/backups"); + + let config = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap(); + assert_eq!(config.dir, "/data/backups"); + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_rejects_empty_dir() { + let _lock = ENV_LOCK.lock().unwrap(); + clear_snapshot_env(); + env::set_var("SNAPSHOT_ENABLED", "true"); + env::set_var("SNAPSHOT_DIR", " "); + + let err = SnapshotConfig::from_env("postgres://test@localhost/test").unwrap_err(); + assert!(err.to_string().contains("SNAPSHOT_DIR must not be empty")); + clear_snapshot_env(); + } + + #[test] + fn snapshot_config_debug_redacts_database_url() { + let config = SnapshotConfig { + enabled: true, + time: NaiveTime::from_hms_opt(3, 0, 0).unwrap(), + retention: 7, + dir: "/snapshots".to_string(), + database_url: "postgres://atlas:secret@db/atlas".to_string(), + }; + + let debug = format!("{config:?}"); + assert!(debug.contains("[redacted]")); + assert!(!debug.contains("secret")); + } + #[test] fn faucet_config_rejects_bad_inputs() { let _lock = ENV_LOCK.lock().unwrap(); diff --git a/backend/crates/atlas-server/src/main.rs b/backend/crates/atlas-server/src/main.rs index 2d9cc76..ed2696a 100644 --- a/backend/crates/atlas-server/src/main.rs +++ b/backend/crates/atlas-server/src/main.rs @@ -12,6 +12,7 @@ mod config; mod faucet; mod head; mod indexer; +mod snapshot; /// Retry delays for exponential backoff (in seconds) const RETRY_DELAYS: &[u64] = &[5, 10, 20, 30, 60]; @@ -60,6 +61,7 @@ async fn main() -> Result<()> { dotenvy::dotenv().ok(); let config = config::Config::from_env()?; let faucet_config = config::FaucetConfig::from_env()?; + let snapshot_config = config::SnapshotConfig::from_env(&config.database_url)?; let faucet = if faucet_config.enabled { tracing::info!("Faucet enabled"); @@ -181,6 +183,18 @@ async fn main() -> Result<()> { } }); + // Spawn snapshot scheduler if enabled + if snapshot_config.enabled { + tracing::info!("Snapshot scheduler enabled"); + tokio::spawn(async move { + if let Err(e) = + run_with_retry(|| snapshot::run_snapshot_loop(snapshot_config.clone())).await + { + tracing::error!("Snapshot scheduler terminated with error: {}", e); + } + }); + } + // Build and serve API let app = api::build_router(state, config.cors_origin.clone()); let addr = format!("{}:{}", config.api_host, config.api_port); diff --git a/backend/crates/atlas-server/src/snapshot.rs b/backend/crates/atlas-server/src/snapshot.rs new file mode 100644 index 0000000..c145edc --- /dev/null +++ b/backend/crates/atlas-server/src/snapshot.rs @@ -0,0 +1,241 @@ +use anyhow::{bail, Result}; +use chrono::{DateTime, NaiveTime, Utc}; +use std::time::Duration; + +use crate::config::SnapshotConfig; + +const SNAPSHOT_RETRY_DELAYS: &[u64] = &[5, 10, 20, 30, 60]; +const SNAPSHOT_MAX_RETRY_DELAY: u64 = 60; + +/// Calculate duration from `now` until the next occurrence of `target` time (UTC). +/// If `target` has already passed today, returns the duration until tomorrow's `target`. +fn duration_until_next(target: NaiveTime, now: DateTime) -> Duration { + let today_target = now.date_naive().and_time(target).and_utc(); + let next = if today_target > now { + today_target + } else { + today_target + chrono::Duration::days(1) + }; + (next - now).to_std().expect("positive duration") +} + +fn retry_delay(attempt: usize) -> Duration { + Duration::from_secs( + SNAPSHOT_RETRY_DELAYS + .get(attempt) + .copied() + .unwrap_or(SNAPSHOT_MAX_RETRY_DELAY), + ) +} + +fn sleep_duration(target: NaiveTime, now: DateTime, retry_attempt: Option) -> Duration { + retry_attempt + .map(retry_delay) + .unwrap_or_else(|| duration_until_next(target, now)) +} + +async fn attempt_snapshot(config: &SnapshotConfig) -> Result<()> { + tokio::fs::create_dir_all(&config.dir).await?; + + let timestamp = Utc::now().format("%Y-%m-%dT%H-%M-%S"); + let filename = format!("atlas_snapshot_{timestamp}.dump"); + let tmp_path = format!("{}/{filename}.tmp", config.dir); + let final_path = format!("{}/{filename}", config.dir); + + tracing::info!(%filename, "Starting database snapshot"); + + let status = tokio::process::Command::new("pg_dump") + .arg("--dbname") + .arg(&config.database_url) + .arg("-Fc") + .arg("-f") + .arg(&tmp_path) + .status() + .await?; + + if status.success() { + tokio::fs::rename(&tmp_path, &final_path).await?; + tracing::info!(%filename, "Snapshot complete"); + cleanup_old_snapshots(&config.dir, config.retention).await; + Ok(()) + } else { + let _ = tokio::fs::remove_file(&tmp_path).await; + bail!("pg_dump failed with status: {status}"); + } +} + +/// Run the snapshot scheduler loop. +/// Snapshot attempts retry with backoff within the same scheduled run so a +/// transient failure does not skip the day entirely. +pub async fn run_snapshot_loop(config: SnapshotConfig) -> Result<()> { + tracing::info!( + time = %config.time.format("%H:%M"), + retention = config.retention, + dir = %config.dir, + "Snapshot scheduler started" + ); + + let mut retry_attempt = None; + + loop { + let sleep_dur = sleep_duration(config.time, Utc::now(), retry_attempt); + if let Some(attempt) = retry_attempt { + tracing::warn!( + attempt = attempt + 1, + seconds = sleep_dur.as_secs(), + "Retrying failed snapshot after backoff" + ); + } else { + tracing::info!( + seconds = sleep_dur.as_secs(), + "Sleeping until next snapshot" + ); + } + tokio::time::sleep(sleep_dur).await; + + match attempt_snapshot(&config).await { + Ok(()) => retry_attempt = None, + Err(err) => { + let next_attempt = retry_attempt.map(|attempt| attempt + 1).unwrap_or(0); + tracing::error!( + error = %err, + attempt = next_attempt + 1, + "Snapshot attempt failed" + ); + retry_attempt = Some(next_attempt); + } + } + } +} + +/// Remove old snapshot files, keeping only the newest `retention` count. +async fn cleanup_old_snapshots(dir: &str, retention: u32) { + let mut files = Vec::new(); + let Ok(mut entries) = tokio::fs::read_dir(dir).await else { + return; + }; + + while let Ok(Some(entry)) = entries.next_entry().await { + let name = entry.file_name(); + let name = name.to_string_lossy(); + if name.starts_with("atlas_snapshot_") && name.ends_with(".dump") && !name.ends_with(".tmp") + { + files.push(entry.path()); + } + } + + // Sort descending (newest first) — timestamp in filename gives lexicographic order + files.sort(); + files.reverse(); + + for old in files.into_iter().skip(retention as usize) { + tracing::info!(path = %old.display(), "Removing old snapshot"); + if let Err(e) = tokio::fs::remove_file(&old).await { + tracing::warn!(path = %old.display(), error = %e, "Failed to remove old snapshot"); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::TimeZone; + + #[test] + fn duration_until_next_target_in_future_today() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 10, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(15, 0, 0).unwrap(); + let dur = duration_until_next(target, now); + assert_eq!(dur, Duration::from_secs(5 * 3600)); // 5 hours + } + + #[test] + fn duration_until_next_target_already_passed() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 16, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = duration_until_next(target, now); + assert_eq!(dur, Duration::from_secs(11 * 3600)); // 11 hours until 03:00 next day + } + + #[test] + fn duration_until_next_target_exactly_now_wraps_to_tomorrow() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 3, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = duration_until_next(target, now); + assert_eq!(dur, Duration::from_secs(24 * 3600)); // full 24 hours + } + + #[test] + fn sleep_duration_uses_schedule_when_not_retrying() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 16, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = sleep_duration(target, now, None); + assert_eq!(dur, Duration::from_secs(11 * 3600)); + } + + #[test] + fn sleep_duration_uses_retry_backoff_after_failure() { + let now = Utc.with_ymd_and_hms(2026, 3, 19, 16, 0, 0).unwrap(); + let target = NaiveTime::from_hms_opt(3, 0, 0).unwrap(); + let dur = sleep_duration(target, now, Some(0)); + assert_eq!(dur, Duration::from_secs(5)); + } + + #[tokio::test] + async fn cleanup_keeps_only_retention_count() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + // Create 5 snapshot files with different timestamps + for i in 1..=5 { + let path = dir + .path() + .join(format!("atlas_snapshot_2026-03-{i:02}T03-00-00.dump")); + tokio::fs::write(&path, b"test").await.unwrap(); + } + + // Also create a .tmp file that should be ignored + let tmp = dir + .path() + .join("atlas_snapshot_2026-03-06T03-00-00.dump.tmp"); + tokio::fs::write(&tmp, b"tmp").await.unwrap(); + + cleanup_old_snapshots(dir_path, 3).await; + + let mut remaining = Vec::new(); + let mut entries = tokio::fs::read_dir(dir_path).await.unwrap(); + while let Some(entry) = entries.next_entry().await.unwrap() { + remaining.push(entry.file_name().to_string_lossy().to_string()); + } + remaining.sort(); + + // Should keep 3 newest + the .tmp file + assert_eq!(remaining.len(), 4); + assert!(remaining.contains(&"atlas_snapshot_2026-03-03T03-00-00.dump".to_string())); + assert!(remaining.contains(&"atlas_snapshot_2026-03-04T03-00-00.dump".to_string())); + assert!(remaining.contains(&"atlas_snapshot_2026-03-05T03-00-00.dump".to_string())); + assert!(remaining.contains(&"atlas_snapshot_2026-03-06T03-00-00.dump.tmp".to_string())); + } + + #[tokio::test] + async fn cleanup_noop_when_under_retention() { + let dir = tempfile::tempdir().unwrap(); + let dir_path = dir.path().to_str().unwrap(); + + for i in 1..=2 { + let path = dir + .path() + .join(format!("atlas_snapshot_2026-03-{i:02}T03-00-00.dump")); + tokio::fs::write(&path, b"test").await.unwrap(); + } + + cleanup_old_snapshots(dir_path, 5).await; + + let mut count = 0; + let mut entries = tokio::fs::read_dir(dir_path).await.unwrap(); + while entries.next_entry().await.unwrap().is_some() { + count += 1; + } + assert_eq!(count, 2); + } +} diff --git a/backend/crates/atlas-server/tests/integration/common.rs b/backend/crates/atlas-server/tests/integration/common.rs index da128f6..ad8b4a0 100644 --- a/backend/crates/atlas-server/tests/integration/common.rs +++ b/backend/crates/atlas-server/tests/integration/common.rs @@ -13,6 +13,7 @@ use atlas_server::head::HeadTracker; struct TestEnv { runtime: tokio::runtime::Runtime, pool: PgPool, + database_url: String, _container: ContainerAsync, } @@ -20,7 +21,7 @@ struct TestEnv { static ENV: LazyLock = LazyLock::new(|| { let runtime = tokio::runtime::Runtime::new().expect("create test runtime"); - let (pool, container) = runtime.block_on(async { + let (pool, container, database_url) = runtime.block_on(async { let container = Postgres::default() .start() .await @@ -42,12 +43,13 @@ static ENV: LazyLock = LazyLock::new(|| { .await .expect("Failed to run migrations"); - (pool, container) + (pool, container, database_url) }); TestEnv { runtime, pool, + database_url, _container: container, } }); @@ -56,6 +58,10 @@ pub fn pool() -> &'static PgPool { &ENV.pool } +pub fn database_url() -> &'static str { + &ENV.database_url +} + pub fn test_router() -> Router { let pool = pool().clone(); let head_tracker = Arc::new(HeadTracker::empty(10)); diff --git a/backend/crates/atlas-server/tests/integration/main.rs b/backend/crates/atlas-server/tests/integration/main.rs index cb27ff0..ceede08 100644 --- a/backend/crates/atlas-server/tests/integration/main.rs +++ b/backend/crates/atlas-server/tests/integration/main.rs @@ -4,6 +4,7 @@ mod addresses; mod blocks; mod nfts; mod search; +mod snapshots; mod status; mod tokens; mod transactions; diff --git a/backend/crates/atlas-server/tests/integration/snapshots.rs b/backend/crates/atlas-server/tests/integration/snapshots.rs new file mode 100644 index 0000000..8304bfa --- /dev/null +++ b/backend/crates/atlas-server/tests/integration/snapshots.rs @@ -0,0 +1,95 @@ +use crate::common; + +/// Check if a command is available on PATH. +fn has_command(cmd: &str) -> bool { + std::process::Command::new("which") + .arg(cmd) + .output() + .map(|o| o.status.success()) + .unwrap_or(false) +} + +#[test] +#[ignore] // Requires pg_dump and pg_restore on PATH +fn snapshot_dump_and_restore_round_trip() { + if !has_command("pg_dump") || !has_command("pg_restore") { + eprintln!("Skipping: pg_dump/pg_restore not found on PATH"); + return; + } + + common::run(async { + let pool = common::pool(); + let db_url = common::database_url(); + + // Insert test data + sqlx::query("INSERT INTO indexer_state (key, value) VALUES ('snapshot_test', 'hello') ON CONFLICT (key) DO UPDATE SET value = 'hello'") + .execute(pool) + .await + .expect("insert test data"); + + // pg_dump to temp file + let dir = tempfile::tempdir().expect("create temp dir"); + let dump_path = dir.path().join("test_snapshot.dump"); + + let dump_status = tokio::process::Command::new("pg_dump") + .arg("--dbname") + .arg(db_url) + .arg("-Fc") + .arg("-f") + .arg(&dump_path) + .status() + .await + .expect("spawn pg_dump"); + + assert!(dump_status.success(), "pg_dump failed: {dump_status}"); + + let metadata = tokio::fs::metadata(&dump_path) + .await + .expect("stat dump file"); + assert!(metadata.len() > 0, "dump file is empty"); + + // Create a separate database for restore + sqlx::query("CREATE DATABASE test_restore") + .execute(pool) + .await + .expect("create test_restore database"); + + let restore_url = db_url.replace("/postgres", "/test_restore"); + + // pg_restore into the new database + let restore_status = tokio::process::Command::new("pg_restore") + .arg("--dbname") + .arg(&restore_url) + .arg(&dump_path) + .status() + .await + .expect("spawn pg_restore"); + + assert!( + restore_status.success(), + "pg_restore failed: {restore_status}" + ); + + // Verify data in restored database + let restore_pool = sqlx::postgres::PgPoolOptions::new() + .max_connections(1) + .connect(&restore_url) + .await + .expect("connect to restored database"); + + let row: (String,) = + sqlx::query_as("SELECT value FROM indexer_state WHERE key = 'snapshot_test'") + .fetch_one(&restore_pool) + .await + .expect("query restored data"); + + assert_eq!(row.0, "hello"); + + // Cleanup + restore_pool.close().await; + sqlx::query("DROP DATABASE test_restore") + .execute(pool) + .await + .expect("drop test_restore database"); + }); +} diff --git a/docker-compose.yml b/docker-compose.yml index 4ce3d37..98159b3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -43,9 +43,16 @@ services: BACKGROUND_COLOR_LIGHT: ${BACKGROUND_COLOR_LIGHT:-} SUCCESS_COLOR: ${SUCCESS_COLOR:-} ERROR_COLOR: ${ERROR_COLOR:-} + SNAPSHOT_ENABLED: ${SNAPSHOT_ENABLED:-false} + SNAPSHOT_TIME: ${SNAPSHOT_TIME:-03:00} + SNAPSHOT_RETENTION: ${SNAPSHOT_RETENTION:-7} + SNAPSHOT_DIR: ${SNAPSHOT_DIR:-/snapshots} API_HOST: 0.0.0.0 API_PORT: 3000 RUST_LOG: atlas_server=info,tower_http=info + user: "${UID:-1000}:${GID:-1000}" + volumes: + - ${SNAPSHOT_HOST_DIR:-./snapshots}:${SNAPSHOT_DIR:-/snapshots} depends_on: postgres: condition: service_healthy