diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 9ed05dc..5aa8b03 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -4,6 +4,7 @@ on: branches: ["latest"] paths: - src/** + - crates/** - Cargo.toml - Cargo.lock - .github/workflows/test.yaml @@ -46,8 +47,12 @@ jobs: ~/.cargo/bin target - - name: consult Clippy - run: cargo clippy --all-targets --features postgres,vsock + - name: Lint workspace (clippy) + run: | + cargo clippy --workspace --all-targets + cargo clippy -p zerolease --all-targets --features vsock,kms + cargo clippy -p zerolease-provider --all-targets --features vault + cargo clippy --manifest-path crates/zerolease-store-postgres/Cargo.toml --all-targets - name: Install cargo-nextest shell: bash @@ -58,13 +63,16 @@ jobs: mv cargo-nextest /home/runner/.cargo/bin fi - - name: Run tests (including vsock) - run: cargo nextest run --features vsock + - name: Run workspace tests + run: cargo nextest run --workspace + + - name: Run vsock tests + run: cargo nextest run -p zerolease --features vsock - name: Run PostgreSQL integration tests env: DATABASE_URL: postgres://zerolease:zerolease@localhost/zerolease_test - run: cargo nextest run --features postgres store::postgres::tests -E 'test(store::postgres)' --run-ignored ignored-only --test-threads=1 + run: cargo nextest run --manifest-path crates/zerolease-store-postgres/Cargo.toml --run-ignored ignored-only --test-threads=1 - name: Run KMS integration tests env: @@ -73,10 +81,12 @@ jobs: AWS_REGION: ${{ secrets.AWS_REGION }} ZEROLEASE_KMS_TEST_KEY_ID: alias/zerolease-test ZEROLEASE_KMS_TEST_REGION: us-west-2 - run: cargo nextest run --features kms keysource::kms -E 'test(keysource::kms)' --run-ignored ignored-only --test-threads=1 + run: cargo nextest run -p zerolease --features kms -E 'test(keysource::kms)' --run-ignored ignored-only --test-threads=1 - name: Run doctests - run: cargo test --doc + run: | + cargo test --workspace --doc + cargo test --manifest-path crates/zerolease-store-postgres/Cargo.toml --doc coverage: name: coverage @@ -108,17 +118,20 @@ jobs: - name: Install cargo-llvm-cov uses: taiki-e/install-action@cargo-llvm-cov - - name: Generate coverage (default + vsock tests) - run: cargo llvm-cov --features vsock,postgres --lcov --output-path lcov-default.info + - name: Generate coverage (workspace tests) + run: cargo llvm-cov --workspace --lcov --output-path lcov-workspace.info + + - name: Generate coverage (vsock tests) + run: cargo llvm-cov -p zerolease --features vsock --lcov --output-path lcov-vsock.info --no-clean - name: Generate coverage (PostgreSQL tests) env: DATABASE_URL: postgres://zerolease:zerolease@localhost/zerolease_test - run: cargo llvm-cov --features postgres --lcov --output-path lcov-postgres.info -- store::postgres --ignored --test-threads=1 + run: cargo llvm-cov --manifest-path crates/zerolease-store-postgres/Cargo.toml --lcov --output-path lcov-postgres.info -- --ignored --test-threads=1 - name: Upload coverage to Codecov uses: codecov/codecov-action@v5 with: - files: lcov-default.info,lcov-postgres.info + files: lcov-workspace.info,lcov-vsock.info,lcov-postgres.info fail_ci_if_error: false token: ${{ secrets.CODECOV_TOKEN }} diff --git a/Cargo.toml b/Cargo.toml index 9d13685..12221e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,64 +1,87 @@ +[workspace] +members = [".", "crates/zerolease-provider", "crates/zerolease-store-rusqlite"] +# zerolease-store-postgres excluded from default workspace due to sqlx v0.8 +# libsqlite3-sys conflict with rusqlite. Build/test it separately: +# cargo check -p zerolease-store-postgres --manifest-path crates/zerolease-store-postgres/Cargo.toml +exclude = ["crates/zerolease-store-postgres"] +resolver = "3" + +[workspace.package] +edition = "2024" +authors = ["C J Silverio "] +license = "Apache-2.0" +repository = "https://github.com/ceejbot/zerolease" + +[workspace.lints.rust] +unsafe_code = { level = "deny", priority = 0 } +future_incompatible = { level = "deny", priority = 1 } +rust_2018_idioms = { level = "warn", priority = 2 } +trivial_casts = { level = "warn", priority = 3 } +trivial_numeric_casts = { level = "warn", priority = 4 } +unused_lifetimes = { level = "warn", priority = 5 } +unused_qualifications = { level = "warn", priority = 6 } + +[workspace.lints.clippy] +unwrap_used = "deny" + +[workspace.dependencies] +async-trait = "0.1" +chrono = { version = "0.4", features = ["serde"] } +secrecy = { version = "0.10", features = ["serde"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +thiserror = "2" +tokio = { version = "1", features = ["full"] } +uuid = { version = "1", features = ["v7", "serde"] } +zerolease = { path = ".", default-features = false } + [package] name = "zerolease" -authors = ["C J Silverio "] +authors.workspace = true version = "0.1.0" -edition = "2024" +edition.workspace = true description = "A lightweight, agent-aware credential vault with lease-based access control" -license = "Apache-2.0" -repository = "https://github.com/ceejbot/zerolease" +license.workspace = true +repository.workspace = true keywords = ["credentials", "vault", "security", "agents", "secrets"] readme = "README.md" categories = ["authentication", "cryptography"] [dependencies] aes-gcm = "0.10" -async-trait = "0.1" +async-trait.workspace = true aws-config = { version = "1", optional = true } aws-sdk-kms = { version = "1", optional = true } base64 = "0.22" chacha20poly1305 = "0.10" -chrono = { version = "0.4", features = ["serde"] } -secrecy = { version = "0.10", features = ["serde"] } -serde = { version = "1", features = ["derive"] } -serde_json = "1" -thiserror = "2" -tokio = { version = "1", features = ["full"] } +chrono.workspace = true +secrecy.workspace = true +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +tokio.workspace = true tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } -uuid = { version = "1", features = ["v7", "serde"] } +uuid.workspace = true zeroize = { version = "1.8", features = ["derive"] } -[dependencies.sqlx] -version = "0.8" -features = ["runtime-tokio", "sqlite", "postgres", "chrono", "uuid"] -optional = true - [features] -default = ["sqlite"] -sqlite = ["sqlx"] -postgres = ["sqlx"] +default = [] +# Gates integration tests that depend on the in-tree sqlx-based stores +# (not yet extracted to zerolease-store-sqlx). Unused for now. +sqlite = [] vsock = ["tokio-vsock"] kms = ["aws-sdk-kms", "aws-config"] [target.'cfg(unix)'.dependencies] -# OS keychain (Linux secret-service, macOS Keychain) keyring = "3.6" [target.'cfg(target_os = "linux")'.dependencies] -# vsock for QEMU and Firecracker host-guest communication tokio-vsock = { version = "0.7", optional = true } [dev-dependencies] tempfile = "3" +zerolease-store-rusqlite = { path = "crates/zerolease-store-rusqlite" } -[lints.rust] -unsafe_code = { level = "deny", priority = 0 } -future_incompatible = { level = "deny", priority = 1 } -rust_2018_idioms = { level = "warn", priority = 2 } -trivial_casts = { level = "warn", priority = 3 } -trivial_numeric_casts = { level = "warn", priority = 4 } -unused_lifetimes = { level = "warn", priority = 5 } -unused_qualifications = { level = "warn", priority = 6 } - -[lints.clippy] -unwrap_used = "deny" +[lints] +workspace = true diff --git a/crates/zerolease-provider/Cargo.toml b/crates/zerolease-provider/Cargo.toml new file mode 100644 index 0000000..29bfc7f --- /dev/null +++ b/crates/zerolease-provider/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "zerolease-provider" +version = "0.1.0" +description = "CredentialProvider trait for lease-based credential access in AI agent tools" +edition.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true + +[dependencies] +async-trait.workspace = true +secrecy.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["sync", "rt"] } +uuid.workspace = true +zerolease = { workspace = true, default-features = false, optional = true } + +[features] +vault = ["dep:zerolease"] + +[lints] +workspace = true diff --git a/crates/zerolease-provider/src/credential.rs b/crates/zerolease-provider/src/credential.rs new file mode 100644 index 0000000..4d1ad41 --- /dev/null +++ b/crates/zerolease-provider/src/credential.rs @@ -0,0 +1,92 @@ +//! Credential guard: a zeroize-on-drop, revoke-on-drop handle to a secret. +//! +//! `CredentialGuard` wraps a `SecretString` obtained through a zerolease +//! lease. When the guard is dropped, the secret is zeroized from memory +//! and the lease revocation is requested via a background channel. +//! +//! The `expose()` closure pattern prevents callers from storing the +//! credential in a variable that outlives the guard. + +use secrecy::{ExposeSecret, SecretString}; +use tokio::sync::mpsc; +use uuid::Uuid; + +/// A handle to an active credential. The secret value is accessible +/// only through [`expose()`](Self::expose) and is zeroized when this +/// guard drops. The underlying lease is revoked on drop. +/// +/// NOT Clone, NOT Serialize. Debug redacts the secret value. +pub struct CredentialGuard { + secret: SecretString, + lease_id: Uuid, + target_domain: String, + revoke_tx: Option>, +} + +impl CredentialGuard { + /// Create a guard backed by a vault lease with a revocation channel. + #[allow(dead_code)] // we have library users + pub(crate) fn new( + secret: SecretString, + lease_id: Uuid, + target_domain: String, + revoke_tx: mpsc::Sender, + ) -> Self { + Self { + secret, + lease_id, + target_domain, + revoke_tx: Some(revoke_tx), + } + } + + /// Create a guard with no revocation channel (for static/test providers). + pub(crate) fn new_static(secret: SecretString, target_domain: String) -> Self { + Self { + secret, + lease_id: Uuid::now_v7(), + target_domain, + revoke_tx: None, + } + } + + /// Access the secret value. The closure receives a `&str` that + /// must not be stored beyond the closure's scope. + pub fn expose(&self, f: F) -> R + where + F: FnOnce(&str) -> R, + { + f(self.secret.expose_secret()) + } + + /// The domain this credential is scoped to. + pub fn target_domain(&self) -> &str { + &self.target_domain + } + + /// The lease ID backing this credential. + pub fn lease_id(&self) -> Uuid { + self.lease_id + } +} + +impl std::fmt::Debug for CredentialGuard { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CredentialGuard") + .field("lease_id", &self.lease_id) + .field("target_domain", &self.target_domain) + .field("secret", &"[REDACTED]") + .finish() + } +} + +impl Drop for CredentialGuard { + fn drop(&mut self) { + if let Some(tx) = self.revoke_tx.take() { + // Best-effort: if the channel is full or closed, we can't + // block in Drop. The lease will expire on its own via TTL. + let _ = tx.try_send(self.lease_id); + } + // SecretString handles zeroization of the secret value. + } +} diff --git a/crates/zerolease-provider/src/error.rs b/crates/zerolease-provider/src/error.rs new file mode 100644 index 0000000..b2f8900 --- /dev/null +++ b/crates/zerolease-provider/src/error.rs @@ -0,0 +1,22 @@ +//! Error types for the credential provider. +//! +//! These errors intentionally do not expose vault internals. +//! A tool sees "credential unavailable" — never lease IDs, +//! policy details, or encryption errors. + +#[derive(Debug, thiserror::Error)] +pub enum ProviderError { + /// The requested credential could not be acquired. + /// This covers policy denial, missing secrets, and vault errors — + /// intentionally vague to avoid leaking vault internals. + #[error("credential unavailable: {0}")] + Unavailable(String), + + /// The provider could not connect to the vault. + #[error("vault connection failed: {0}")] + ConnectionFailed(String), + + /// The credential value was not valid UTF-8. + #[error("credential is not valid UTF-8")] + InvalidUtf8, +} diff --git a/crates/zerolease-provider/src/lib.rs b/crates/zerolease-provider/src/lib.rs new file mode 100644 index 0000000..dc31c80 --- /dev/null +++ b/crates/zerolease-provider/src/lib.rs @@ -0,0 +1,24 @@ +//! zerolease-provider: a `CredentialProvider` trait for AI agent tools. +//! +//! Instead of storing credentials as static `String` fields for the +//! process lifetime, tools call `provider.acquire()` at execution time +//! and receive a `CredentialGuard` that is time-bounded, domain-scoped, +//! and zeroized on drop. +//! +//! The `ZeroleaseProvider` implementation connects to a zerolease vault +//! server over UDS. Other backends (static config, HashiCorp Vault, etc.) +//! can implement the same trait. + +pub mod credential; +pub mod error; +pub mod provider; +pub mod static_provider; +#[cfg(feature = "vault")] +pub mod zerolease_provider; + +pub use credential::CredentialGuard; +pub use error::ProviderError; +pub use provider::{CredentialProvider, CredentialRequest}; +pub use static_provider::StaticProvider; +#[cfg(feature = "vault")] +pub use zerolease_provider::ZeroleaseProvider; diff --git a/crates/zerolease-provider/src/provider.rs b/crates/zerolease-provider/src/provider.rs new file mode 100644 index 0000000..f7fa3ae --- /dev/null +++ b/crates/zerolease-provider/src/provider.rs @@ -0,0 +1,37 @@ +//! The `CredentialProvider` trait: the bridge between AI agent tools +//! and a credential vault. +//! +//! Tools call `provider.acquire()` at execution time to get a +//! `CredentialGuard` — a time-bounded, domain-scoped, zeroize-on-drop +//! handle to a secret. This replaces the static `String` credential +//! fields that tools typically store for their entire process lifetime. + +use crate::credential::CredentialGuard; +use crate::error::ProviderError; + +/// A request for a credential, describing what the tool needs. +#[derive(Debug, Clone)] +pub struct CredentialRequest { + /// Which secret to access (e.g., "jira-api-token"). + pub secret_name: String, + /// The domain this credential will be used against (e.g., + /// "mycompany.atlassian.net"). + pub target_domain: String, + /// Identity of the requesting agent. + pub agent_id: String, +} + +/// A provider that acquires credentials from a vault on behalf of tools. +/// +/// Implementations handle the lease lifecycle: request a lease, access +/// the secret, and arrange for revocation when the guard is dropped. +#[async_trait::async_trait] +pub trait CredentialProvider: Send + Sync { + /// Acquire a credential for the given request. + /// + /// Returns a [`CredentialGuard`] that: + /// - Provides the secret value via `expose()` + /// - Zeroizes the secret from memory on drop + /// - Revokes the underlying lease on drop + async fn acquire(&self, request: CredentialRequest) -> Result; +} diff --git a/crates/zerolease-provider/src/static_provider.rs b/crates/zerolease-provider/src/static_provider.rs new file mode 100644 index 0000000..977b473 --- /dev/null +++ b/crates/zerolease-provider/src/static_provider.rs @@ -0,0 +1,63 @@ +//! A static, in-memory `CredentialProvider` for testing and migration. +//! +//! `StaticProvider` holds credentials in a `HashMap` and returns them +//! without contacting a vault. Guards created by this provider have no +//! revocation channel — drop is a no-op beyond zeroization. +//! +//! This serves two purposes: +//! - Testing tools without a running vault +//! - Representing the "current behavior" (static credentials) behind the +//! `CredentialProvider` trait during incremental migration + +use std::collections::HashMap; + +use secrecy::SecretString; + +use crate::credential::CredentialGuard; +use crate::error::ProviderError; +use crate::provider::{CredentialProvider, CredentialRequest}; + +/// An in-memory credential provider backed by a `HashMap`. +/// +/// Credentials are keyed by `secret_name`. Domain and agent checks +/// are not enforced (this is a test/migration helper, not a vault). +pub struct StaticProvider { + credentials: HashMap, +} + +impl StaticProvider { + /// Create an empty provider. + pub fn new() -> Self { + Self { + credentials: HashMap::new(), + } + } + + /// Insert a credential. Overwrites any existing value for the key. + pub fn insert(&mut self, secret_name: impl Into, value: impl Into) { + self.credentials + .insert(secret_name.into(), SecretString::from(value.into())); + } +} + +impl Default for StaticProvider { + fn default() -> Self { + Self::new() + } +} + +#[async_trait::async_trait] +impl CredentialProvider for StaticProvider { + async fn acquire(&self, request: CredentialRequest) -> Result { + let secret = self + .credentials + .get(&request.secret_name) + .ok_or_else(|| ProviderError::Unavailable(format!("no such secret: {}", request.secret_name)))?; + + // Clone the secret value into a new guard. StaticProvider guards + // have no revocation channel — drop just zeroizes. + use secrecy::ExposeSecret; + let cloned = SecretString::from(secret.expose_secret().to_string()); + Ok(CredentialGuard::new_static(cloned, request.target_domain)) + } +} diff --git a/crates/zerolease-provider/src/zerolease_provider.rs b/crates/zerolease-provider/src/zerolease_provider.rs new file mode 100644 index 0000000..5d5ac52 --- /dev/null +++ b/crates/zerolease-provider/src/zerolease_provider.rs @@ -0,0 +1,97 @@ +//! `CredentialProvider` implementation backed by a zerolease vault server. +//! +//! Connects to a vault over UDS, requests leases, and wraps secrets +//! in `CredentialGuard`s that revoke on drop via a background task. + +use std::path::{Path, PathBuf}; + +use secrecy::SecretString; +use tokio::sync::mpsc; +use uuid::Uuid; +use zerolease::audit::RevocationReason; +use zerolease::client::VaultClient; +use zerolease::transport::uds::UdsConnector; + +use crate::credential::CredentialGuard; +use crate::error::ProviderError; +use crate::provider::{CredentialProvider, CredentialRequest}; + +/// A `CredentialProvider` that acquires credentials from a zerolease +/// vault server over a Unix domain socket. +/// +/// Each `acquire()` call opens a fresh connection to the vault (fine +/// for a spike; production code would pool connections). A background +/// tokio task handles lease revocation when guards are dropped. +pub struct ZeroleaseProvider { + socket_path: PathBuf, + revoke_tx: mpsc::Sender, +} + +impl ZeroleaseProvider { + /// Create a new provider connected to a vault at the given socket path. + /// + /// Spawns a background task that revokes leases as `CredentialGuard`s + /// are dropped. + pub fn new(socket_path: impl AsRef) -> Self { + let socket_path = socket_path.as_ref().to_path_buf(); + let (revoke_tx, revoke_rx) = mpsc::channel::(64); + + let bg_socket = socket_path.clone(); + tokio::spawn(revocation_worker(bg_socket, revoke_rx)); + + Self { socket_path, revoke_tx } + } +} + +#[async_trait::async_trait] +impl CredentialProvider for ZeroleaseProvider { + async fn acquire(&self, request: CredentialRequest) -> Result { + let connector = UdsConnector::new(&self.socket_path); + let mut client = VaultClient::connect(&connector) + .await + .map_err(|e| ProviderError::ConnectionFailed(e.to_string()))?; + + // Request a lease + let grant = client + .request_lease(&request.agent_id, &request.secret_name, &request.target_domain) + .await + .map_err(|e| ProviderError::Unavailable(e.to_string()))?; + + // Access the secret through the lease + let secret_bytes = client + .access_secret(*grant.lease_id.as_uuid(), &request.target_domain) + .await + .map_err(|e| ProviderError::Unavailable(e.to_string()))?; + + let secret_str = String::from_utf8(secret_bytes).map_err(|_| ProviderError::InvalidUtf8)?; + + Ok(CredentialGuard::new( + SecretString::from(secret_str), + *grant.lease_id.as_uuid(), + request.target_domain, + self.revoke_tx.clone(), + )) + } +} + +/// Background task that revokes leases as guards are dropped. +/// +/// Connects to the vault for each revocation. In production, this +/// would batch revocations or use a persistent connection. +async fn revocation_worker(socket_path: PathBuf, mut rx: mpsc::Receiver) { + while let Some(lease_id) = rx.recv().await { + let connector = UdsConnector::new(&socket_path); + match VaultClient::connect(&connector).await { + Ok(mut client) => { + if let Err(e) = client.revoke_lease(lease_id, RevocationReason::AdminRevoked).await { + // Best-effort: log and continue. The lease has a TTL + // and will expire on its own. + eprintln!("warning: failed to revoke lease {lease_id}: {e}"); + } + } + Err(e) => { + eprintln!("warning: could not connect for lease revocation: {e}"); + } + } + } +} diff --git a/crates/zerolease-store-postgres/.gitignore b/crates/zerolease-store-postgres/.gitignore new file mode 100644 index 0000000..2f7896d --- /dev/null +++ b/crates/zerolease-store-postgres/.gitignore @@ -0,0 +1 @@ +target/ diff --git a/crates/zerolease-store-postgres/Cargo.toml b/crates/zerolease-store-postgres/Cargo.toml new file mode 100644 index 0000000..bc0740c --- /dev/null +++ b/crates/zerolease-store-postgres/Cargo.toml @@ -0,0 +1,33 @@ +# This crate is excluded from the main zerolease workspace due to +# sqlx v0.8's libsqlite3-sys conflict with rusqlite. Build/test separately. +[workspace] + +[package] +name = "zerolease-store-postgres" +version = "0.1.0" +description = "PostgreSQL-backed storage backends for zerolease (SecretStore + AuditLog)" +edition = "2024" +authors = ["C J Silverio "] +license = "Apache-2.0" +repository = "https://github.com/ceejbot/zerolease" + +[dependencies] +async-trait = "0.1" +chrono = { version = "0.4", features = ["serde"] } +serde_json = "1" +tokio = { version = "1", features = ["full"] } +uuid = { version = "1", features = ["v7", "serde"] } +zerolease = { path = "../..", default-features = false } + +[dependencies.sqlx] +version = "0.8" +features = ["runtime-tokio", "postgres", "chrono", "uuid"] + +[dev-dependencies] +tempfile = "3" + +[lints.rust] +unsafe_code = { level = "deny", priority = 0 } + +[lints.clippy] +unwrap_used = "deny" diff --git a/crates/zerolease-store-postgres/src/audit.rs b/crates/zerolease-store-postgres/src/audit.rs new file mode 100644 index 0000000..21ee6c0 --- /dev/null +++ b/crates/zerolease-store-postgres/src/audit.rs @@ -0,0 +1,208 @@ +//! PostgreSQL-backed audit log. + +use chrono::{DateTime, Utc}; +use sqlx::postgres::PgPoolOptions; +use sqlx::{PgPool, Row}; +use uuid::Uuid; + +use zerolease::audit::{AuditEntry, AuditEvent, AuditLog, AuditOutcome}; +use zerolease::error::{Error, Result}; +use zerolease::types::{AgentId, LeaseId, SecretName}; + +/// A persistent audit log backed by PostgreSQL. +pub struct PostgresAuditLog { + pool: PgPool, +} + +impl PostgresAuditLog { + /// Connect to a PostgreSQL database at the given URL. + /// Creates the audit_events table and indexes if they don't exist. + pub async fn new(url: &str) -> Result { + let pool = PgPoolOptions::new() + .max_connections(5) + .connect(url) + .await + .map_err(|e| Error::Storage(format!("failed to open audit database: {e}")))?; + + sqlx::raw_sql(include_str!("../../../sql/postgres_audit_table.sql")) + .execute(&pool) + .await + .map_err(|e| Error::Storage(format!("failed to create audit schema: {e}")))?; + + Ok(Self { pool }) + } +} + + +fn row_to_audit_entry(row: &sqlx::postgres::PgRow) -> Result { + let event_id_str: String = row.try_get("event_id").map_err(|e| Error::Storage(e.to_string()))?; + let event_id = Uuid::parse_str(&event_id_str).map_err(|e| Error::Storage(format!("invalid event_id: {e}")))?; + + let timestamp: DateTime = row + .try_get::, _>("timestamp") + .map_err(|e| Error::Storage(e.to_string()))?; + + let event_str: String = row.try_get("event").map_err(|e| Error::Storage(e.to_string()))?; + let event: AuditEvent = + serde_json::from_str(&event_str).map_err(|e| Error::Storage(format!("invalid event JSON: {e}")))?; + + let agent_str: String = row.try_get("agent").map_err(|e| Error::Storage(e.to_string()))?; + let peer_identity: String = row.try_get("peer_identity").map_err(|e| Error::Storage(e.to_string()))?; + + let outcome_str: String = row.try_get("outcome").map_err(|e| Error::Storage(e.to_string()))?; + let outcome: AuditOutcome = + serde_json::from_str(&outcome_str).map_err(|e| Error::Storage(format!("invalid outcome JSON: {e}")))?; + + Ok(AuditEntry { + event_id, + timestamp, + event, + agent: AgentId::new(agent_str), + peer_identity, + outcome, + }) +} + +#[async_trait::async_trait] +impl AuditLog for PostgresAuditLog { + async fn record(&self, entry: AuditEntry) -> Result<()> { + let (secret_name, lease_id) = entry.event.indexed_fields(); + let event_id_str = entry.event_id.to_string(); + let event_str = serde_json::to_string(&entry.event) + .map_err(|e| Error::Storage(format!("failed to serialize event: {e}")))?; + let agent_str = entry.agent.as_str().to_string(); + let outcome_str = serde_json::to_string(&entry.outcome) + .map_err(|e| Error::Storage(format!("failed to serialize outcome: {e}")))?; + + sqlx::query( + "INSERT INTO audit_events (event_id, timestamp, event, agent, peer_identity, outcome, secret_name, lease_id) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8)", + ) + .bind(&event_id_str) + .bind(entry.timestamp) + .bind(&event_str) + .bind(&agent_str) + .bind(&entry.peer_identity) + .bind(&outcome_str) + .bind(&secret_name) + .bind(&lease_id) + .execute(&self.pool) + .await + .map_err(|e| Error::Storage(format!("failed to insert audit event: {e}")))?; + + Ok(()) + } + + async fn query_by_agent(&self, agent: &AgentId, limit: usize) -> Result> { + let rows = sqlx::query( + "SELECT * FROM audit_events WHERE agent = $1 ORDER BY timestamp DESC LIMIT $2", + ) + .bind(agent.as_str()) + .bind(limit as i64) + .fetch_all(&self.pool) + .await + .map_err(|e| Error::Storage(format!("query_by_agent failed: {e}")))?; + + rows.iter().map(row_to_audit_entry).collect() + } + + async fn query_by_secret(&self, secret: &SecretName, limit: usize) -> Result> { + let rows = sqlx::query( + "SELECT * FROM audit_events WHERE secret_name = $1 ORDER BY timestamp DESC LIMIT $2", + ) + .bind(secret.as_str()) + .bind(limit as i64) + .fetch_all(&self.pool) + .await + .map_err(|e| Error::Storage(format!("query_by_secret failed: {e}")))?; + + rows.iter().map(row_to_audit_entry).collect() + } + + async fn query_by_lease(&self, lease: &LeaseId) -> Result> { + let rows = sqlx::query( + "SELECT * FROM audit_events WHERE lease_id = $1 ORDER BY timestamp DESC", + ) + .bind(lease.as_uuid().to_string()) + .fetch_all(&self.pool) + .await + .map_err(|e| Error::Storage(format!("query_by_lease failed: {e}")))?; + + rows.iter().map(row_to_audit_entry).collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use zerolease::audit::AuditOutcome; + use zerolease::transport::PeerIdentity; + use zerolease::types::DomainScope; + + fn test_url() -> String { + std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://localhost/zerolease_test".to_string()) + } + + async fn test_audit_log() -> PostgresAuditLog { + let log = PostgresAuditLog::new(&test_url()) + .await + .expect("should create audit log"); + sqlx::query("DELETE FROM audit_events") + .execute(&log.pool) + .await + .expect("should clean test data"); + log + } + + fn make_entry(agent: &str, event: AuditEvent) -> AuditEntry { + AuditEntry::new(event, AgentId::new(agent), &PeerIdentity::Anonymous, AuditOutcome::Success) + } + + #[tokio::test] + #[ignore] // requires running PostgreSQL with zerolease_test database + async fn record_and_query_by_agent() { + let log = test_audit_log().await; + for _ in 0..3 { + log.record(make_entry("alice", AuditEvent::DekRotated)).await.expect("record"); + } + log.record(make_entry("bob", AuditEvent::DekRotated)).await.expect("record"); + + let results = log.query_by_agent(&AgentId::new("alice"), 10).await.expect("query"); + assert_eq!(results.len(), 3); + + let bob = log.query_by_agent(&AgentId::new("bob"), 10).await.expect("query"); + assert_eq!(bob.len(), 1); + } + + #[tokio::test] + #[ignore] + async fn query_by_secret() { + let log = test_audit_log().await; + log.record(make_entry("agent", AuditEvent::LeaseGranted { + lease_id: LeaseId::new(), + secret_name: SecretName::new("pg-secret-a"), + domains: vec![DomainScope::new("api.example.com")], + ttl_seconds: 900, + })).await.expect("record"); + + let results = log.query_by_secret(&SecretName::new("pg-secret-a"), 10).await.expect("query"); + assert_eq!(results.len(), 1); + } + + #[tokio::test] + #[ignore] + async fn query_by_lease() { + let log = test_audit_log().await; + let lease = LeaseId::new(); + log.record(make_entry("agent", AuditEvent::LeaseGranted { + lease_id: lease, + secret_name: SecretName::new("pg-secret"), + domains: vec![DomainScope::new("example.com")], + ttl_seconds: 900, + })).await.expect("record"); + + let results = log.query_by_lease(&lease).await.expect("query"); + assert_eq!(results.len(), 1); + } +} diff --git a/crates/zerolease-store-postgres/src/lib.rs b/crates/zerolease-store-postgres/src/lib.rs new file mode 100644 index 0000000..d23cf7d --- /dev/null +++ b/crates/zerolease-store-postgres/src/lib.rs @@ -0,0 +1,15 @@ +//! PostgreSQL-backed storage backends for zerolease. +//! +//! Provides `PostgresStore` (implements `SecretStore`) and +//! `PostgresAuditLog` (implements `AuditLog`) using `sqlx` with +//! the `postgres` feature. +//! +//! Suitable for shared infrastructure where multiple vault instances +//! need a common store, or where you want to leverage existing +//! PostgreSQL infrastructure and backup tooling. + +mod audit; +mod store; + +pub use audit::PostgresAuditLog; +pub use store::PostgresStore; diff --git a/crates/zerolease-store-postgres/src/store.rs b/crates/zerolease-store-postgres/src/store.rs new file mode 100644 index 0000000..2275737 --- /dev/null +++ b/crates/zerolease-store-postgres/src/store.rs @@ -0,0 +1,353 @@ +//! PostgreSQL secret store backend. + +use chrono::{DateTime, Utc}; +use sqlx::postgres::PgPoolOptions; +use sqlx::{PgPool, Row}; +use uuid::Uuid; + +use zerolease::error::{Error, Result}; +use zerolease::store::{ + BatchUpdateItem, CipherAlgorithm, SecretKind, SecretMetadata, SecretStore, StoreSecretParams, + StoredSecret, +}; +use zerolease::types::{SecretId, SecretName}; + +/// PostgreSQL-backed implementation of [`SecretStore`]. +/// +/// All values are opaque encrypted blobs — this layer never sees plaintext. +pub struct PostgresStore { + pool: PgPool, +} + +impl PostgresStore { + /// Connect to a PostgreSQL database at the given URL. + /// + /// Creates the secrets table if it does not already exist. + pub async fn new(url: &str) -> Result { + let pool = PgPoolOptions::new() + .max_connections(5) + .connect(url) + .await + .map_err(|e| Error::Storage(e.to_string()))?; + + sqlx::query(include_str!("../../../sql/postgres_secrets.sql")) + .execute(&pool) + .await + .map_err(|e| Error::Storage(e.to_string()))?; + + Ok(Self { pool }) + } +} + +#[async_trait::async_trait] +impl SecretStore for PostgresStore { + async fn put(&self, params: StoreSecretParams) -> Result { + let id = SecretId::new(); + let now = Utc::now(); + let id_str = id.as_uuid().to_string(); + let name_str = params.name.as_str().to_string(); + let algorithm_str = params.algorithm.to_db_string(); + let kind_str = params.kind.to_db_string(); + + sqlx::query( + "INSERT INTO secrets (id, name, ciphertext, nonce, algorithm, kind, description, created_at, updated_at, version) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 1)", + ) + .bind(&id_str) + .bind(&name_str) + .bind(¶ms.ciphertext) + .bind(¶ms.nonce) + .bind(&algorithm_str) + .bind(&kind_str) + .bind(¶ms.description) + .bind(now) + .bind(now) + .execute(&self.pool) + .await + .map_err(|e| { + if let sqlx::Error::Database(ref db_err) = e + && db_err.code().map(|c| c == "23505").unwrap_or(false) + { + return Error::SecretAlreadyExists(params.name.clone()); + } + Error::Storage(format!("insert failed: {e}")) + })?; + + Ok(StoredSecret { + id, + name: params.name, + ciphertext: params.ciphertext, + nonce: params.nonce, + algorithm: params.algorithm, + kind: params.kind, + description: params.description, + created_at: now, + updated_at: now, + version: 1, + }) + } + + async fn get(&self, name: &SecretName) -> Result { + let name_str = name.as_str().to_string(); + let row = sqlx::query( + "SELECT id, name, ciphertext, nonce, algorithm, kind, description, created_at, updated_at, version + FROM secrets WHERE name = $1", + ) + .bind(&name_str) + .fetch_optional(&self.pool) + .await + .map_err(|e| Error::Storage(format!("query failed: {e}")))? + .ok_or_else(|| Error::SecretNotFound(name.clone()))?; + + row_to_stored_secret(&row) + } + + async fn update( + &self, + name: &SecretName, + ciphertext: Vec, + nonce: Vec, + algorithm: CipherAlgorithm, + ) -> Result { + let name_str = name.as_str().to_string(); + let now = Utc::now(); + let algorithm_str = algorithm.to_db_string(); + + let result = sqlx::query( + "UPDATE secrets SET ciphertext = $1, nonce = $2, algorithm = $3, updated_at = $4, version = version + 1 + WHERE name = $5", + ) + .bind(&ciphertext) + .bind(&nonce) + .bind(&algorithm_str) + .bind(now) + .bind(&name_str) + .execute(&self.pool) + .await + .map_err(|e| Error::Storage(format!("update failed: {e}")))?; + + if result.rows_affected() == 0 { + return Err(Error::SecretNotFound(name.clone())); + } + + self.get(name).await + } + + async fn batch_update(&self, updates: Vec) -> Result<()> { + let mut tx = self + .pool + .begin() + .await + .map_err(|e| Error::Storage(format!("failed to begin transaction: {e}")))?; + + for item in &updates { + let name_str = item.name.as_str().to_string(); + let now = Utc::now(); + let algorithm_str = item.algorithm.to_db_string(); + + let result = sqlx::query( + "UPDATE secrets SET ciphertext = $1, nonce = $2, algorithm = $3, updated_at = $4, version = version + 1 WHERE name = $5", + ) + .bind(&item.ciphertext) + .bind(&item.nonce) + .bind(&algorithm_str) + .bind(now) + .bind(&name_str) + .execute(&mut *tx) + .await + .map_err(|e| Error::Storage(format!("batch update failed: {e}")))?; + + if result.rows_affected() == 0 { + return Err(Error::SecretNotFound(item.name.clone())); + } + } + + tx.commit() + .await + .map_err(|e| Error::Storage(format!("transaction commit failed: {e}")))?; + + Ok(()) + } + + async fn delete(&self, name: &SecretName) -> Result<()> { + let name_str = name.as_str().to_string(); + let result = sqlx::query("DELETE FROM secrets WHERE name = $1") + .bind(&name_str) + .execute(&self.pool) + .await + .map_err(|e| Error::Storage(format!("delete failed: {e}")))?; + + if result.rows_affected() == 0 { + return Err(Error::SecretNotFound(name.clone())); + } + Ok(()) + } + + async fn list(&self) -> Result> { + let rows = sqlx::query( + "SELECT id, name, kind, description, created_at, updated_at, version FROM secrets", + ) + .fetch_all(&self.pool) + .await + .map_err(|e| Error::Storage(format!("list query failed: {e}")))?; + + let mut result = Vec::with_capacity(rows.len()); + for row in &rows { + let id_str: String = row.try_get("id").map_err(|e| Error::Storage(e.to_string()))?; + let id_uuid = + Uuid::parse_str(&id_str).map_err(|e| Error::Storage(e.to_string()))?; + let name_str: String = + row.try_get("name").map_err(|e| Error::Storage(e.to_string()))?; + let kind_str: String = + row.try_get("kind").map_err(|e| Error::Storage(e.to_string()))?; + let description: Option = + row.try_get("description").map_err(|e| Error::Storage(e.to_string()))?; + let created_at: DateTime = row + .try_get::, _>("created_at") + .map_err(|e| Error::Storage(e.to_string()))?; + let updated_at: DateTime = row + .try_get::, _>("updated_at") + .map_err(|e| Error::Storage(e.to_string()))?; + let version: i32 = + row.try_get("version").map_err(|e| Error::Storage(e.to_string()))?; + + result.push(SecretMetadata { + id: SecretId::from_uuid(id_uuid), + name: SecretName::new(name_str), + kind: SecretKind::from_db_string(&kind_str)?, + description, + created_at, + updated_at, + version: version as u32, + }); + } + Ok(result) + } +} + + +fn row_to_stored_secret(row: &sqlx::postgres::PgRow) -> Result { + let id_str: String = row.try_get("id").map_err(|e| Error::Storage(e.to_string()))?; + let id_uuid = Uuid::parse_str(&id_str).map_err(|e| Error::Storage(e.to_string()))?; + let name_str: String = row.try_get("name").map_err(|e| Error::Storage(e.to_string()))?; + let algorithm_str: String = + row.try_get("algorithm").map_err(|e| Error::Storage(e.to_string()))?; + let kind_str: String = row.try_get("kind").map_err(|e| Error::Storage(e.to_string()))?; + let description: Option = + row.try_get("description").map_err(|e| Error::Storage(e.to_string()))?; + let created_at: DateTime = row + .try_get::, _>("created_at") + .map_err(|e| Error::Storage(e.to_string()))?; + let updated_at: DateTime = row + .try_get::, _>("updated_at") + .map_err(|e| Error::Storage(e.to_string()))?; + let version: i32 = row.try_get("version").map_err(|e| Error::Storage(e.to_string()))?; + let ciphertext: Vec = + row.try_get("ciphertext").map_err(|e| Error::Storage(e.to_string()))?; + let nonce: Vec = row.try_get("nonce").map_err(|e| Error::Storage(e.to_string()))?; + + Ok(StoredSecret { + id: SecretId::from_uuid(id_uuid), + name: SecretName::new(name_str), + ciphertext, + nonce, + algorithm: CipherAlgorithm::from_db_string(&algorithm_str)?, + kind: SecretKind::from_db_string(&kind_str)?, + description, + created_at, + updated_at, + version: version as u32, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use zerolease::store::CipherAlgorithm; + + fn test_url() -> String { + std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "postgres://localhost/zerolease_test".to_string()) + } + + async fn test_store() -> PostgresStore { + let store = PostgresStore::new(&test_url()) + .await + .expect("should create store"); + sqlx::query("DELETE FROM secrets") + .execute(&store.pool) + .await + .expect("should clean test data"); + store + } + + fn test_params(name: &str) -> StoreSecretParams { + StoreSecretParams { + name: SecretName::new(name), + ciphertext: vec![1, 2, 3, 4], + nonce: vec![5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + algorithm: CipherAlgorithm::Aes256Gcm, + kind: zerolease::store::SecretKind::Pat, + description: Some("test secret".into()), + } + } + + #[tokio::test] + #[ignore] // requires running PostgreSQL with zerolease_test database + async fn put_and_get_round_trip() { + let store = test_store().await; + let stored = store.put(test_params("pg-secret")).await.expect("put"); + assert_eq!(stored.name, SecretName::new("pg-secret")); + assert_eq!(stored.version, 1); + let fetched = store.get(&SecretName::new("pg-secret")).await.expect("get"); + assert_eq!(fetched.ciphertext, stored.ciphertext); + } + + #[tokio::test] + #[ignore] + async fn put_duplicate_name_errors() { + let store = test_store().await; + store.put(test_params("pg-dup")).await.expect("first put"); + let err = store.put(test_params("pg-dup")).await.expect_err("dup").to_string(); + assert!(err.contains("already exists"), "error was: {err}"); + } + + #[tokio::test] + #[ignore] + async fn get_missing_errors() { + let store = test_store().await; + let err = store.get(&SecretName::new("pg-nope")).await.expect_err("missing").to_string(); + assert!(err.contains("not found"), "error was: {err}"); + } + + #[tokio::test] + #[ignore] + async fn update_increments_version() { + let store = test_store().await; + store.put(test_params("pg-versioned")).await.expect("put"); + let updated = store + .update(&SecretName::new("pg-versioned"), vec![10, 20], vec![1; 12], CipherAlgorithm::Aes256Gcm) + .await + .expect("update"); + assert_eq!(updated.version, 2); + } + + #[tokio::test] + #[ignore] + async fn delete_removes_secret() { + let store = test_store().await; + store.put(test_params("pg-doomed")).await.expect("put"); + store.delete(&SecretName::new("pg-doomed")).await.expect("delete"); + assert!(store.get(&SecretName::new("pg-doomed")).await.is_err()); + } + + #[tokio::test] + #[ignore] + async fn list_returns_metadata() { + let store = test_store().await; + store.put(test_params("pg-first")).await.expect("put"); + store.put(test_params("pg-second")).await.expect("put"); + let list = store.list().await.expect("list"); + assert_eq!(list.len(), 2); + } +} diff --git a/crates/zerolease-store-rusqlite/Cargo.toml b/crates/zerolease-store-rusqlite/Cargo.toml new file mode 100644 index 0000000..83ce0a4 --- /dev/null +++ b/crates/zerolease-store-rusqlite/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "zerolease-store-rusqlite" +version = "0.1.0" +description = "Rusqlite-backed storage backends for zerolease (SecretStore + AuditLog)" +edition.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true + +[dependencies] +async-trait.workspace = true +chrono.workspace = true +rusqlite = { version = "0.37", features = ["bundled"] } +serde_json.workspace = true +tokio.workspace = true +uuid.workspace = true +zerolease.workspace = true + +[dev-dependencies] +tempfile = "3" + +[lints] +workspace = true diff --git a/crates/zerolease-store-rusqlite/src/audit.rs b/crates/zerolease-store-rusqlite/src/audit.rs new file mode 100644 index 0000000..a944a71 --- /dev/null +++ b/crates/zerolease-store-rusqlite/src/audit.rs @@ -0,0 +1,271 @@ +//! SQLite-backed audit log using `rusqlite`. + +use std::path::Path; +use std::sync::Arc; + +use chrono::Utc; +use rusqlite::Connection; +use tokio::sync::Mutex; +use uuid::Uuid; +use zerolease::audit::{AuditEntry, AuditEvent, AuditLog, AuditOutcome}; +use zerolease::error::{Error, Result}; +use zerolease::types::{AgentId, LeaseId, SecretName}; + +/// A persistent audit log backed by SQLite via `rusqlite`. +pub struct RusqliteAuditLog { + conn: Arc>, +} + +impl RusqliteAuditLog { + /// Create a new SQLite audit log at the given path. + /// Uses WAL mode for concurrent read/write performance. + pub async fn new(path: impl AsRef) -> Result { + let path = path.as_ref().to_path_buf(); + let conn = tokio::task::spawn_blocking(move || { + let conn = + Connection::open(&path).map_err(|e| Error::Storage(format!("failed to open audit database: {e}")))?; + + conn.pragma_update(None, "journal_mode", "WAL") + .map_err(|e| Error::Storage(format!("failed to set WAL mode: {e}")))?; + + conn.execute_batch(include_str!("../../../sql/sqlite_audit_table.sql")) + .map_err(|e| Error::Storage(format!("failed to create audit_events table: {e}")))?; + + for idx in [ + "CREATE INDEX IF NOT EXISTS idx_audit_agent ON audit_events(agent)", + "CREATE INDEX IF NOT EXISTS idx_audit_secret ON audit_events(secret_name)", + "CREATE INDEX IF NOT EXISTS idx_audit_lease ON audit_events(lease_id)", + "CREATE INDEX IF NOT EXISTS idx_audit_timestamp ON audit_events(timestamp)", + ] { + conn.execute(idx, []) + .map_err(|e| Error::Storage(format!("failed to create index: {e}")))?; + } + + Ok::<_, Error>(conn) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))??; + + Ok(Self { + conn: Arc::new(Mutex::new(conn)), + }) + } +} + +fn row_to_audit_entry(row: &rusqlite::Row<'_>) -> Result { + let event_id_str: String = row.get("event_id").map_err(|e| Error::Storage(e.to_string()))?; + let event_id = Uuid::parse_str(&event_id_str).map_err(|e| Error::Storage(format!("invalid event_id: {e}")))?; + + let timestamp_str: String = row.get("timestamp").map_err(|e| Error::Storage(e.to_string()))?; + let timestamp = chrono::DateTime::parse_from_rfc3339(×tamp_str) + .map_err(|e| Error::Storage(format!("invalid timestamp: {e}")))? + .with_timezone(&Utc); + + let event_str: String = row.get("event").map_err(|e| Error::Storage(e.to_string()))?; + let event: AuditEvent = + serde_json::from_str(&event_str).map_err(|e| Error::Storage(format!("invalid event JSON: {e}")))?; + + let agent_str: String = row.get("agent").map_err(|e| Error::Storage(e.to_string()))?; + let peer_identity: String = row.get("peer_identity").map_err(|e| Error::Storage(e.to_string()))?; + + let outcome_str: String = row.get("outcome").map_err(|e| Error::Storage(e.to_string()))?; + let outcome: AuditOutcome = + serde_json::from_str(&outcome_str).map_err(|e| Error::Storage(format!("invalid outcome JSON: {e}")))?; + + Ok(AuditEntry { + event_id, + timestamp, + event, + agent: AgentId::new(agent_str), + peer_identity, + outcome, + }) +} + +fn query_rows(conn: &Connection, sql: &str, params: &[&dyn rusqlite::types::ToSql]) -> Result> { + let mut stmt = conn + .prepare(sql) + .map_err(|e| Error::Storage(format!("prepare failed: {e}")))?; + let rows = stmt + .query_map(params, |row| { + row_to_audit_entry(row) + .map_err(|e| rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(e))) + }) + .map_err(|e| Error::Storage(format!("query failed: {e}")))?; + + let mut result = Vec::new(); + for row in rows { + result.push(row.map_err(|e| Error::Storage(format!("row extraction failed: {e}")))?); + } + Ok(result) +} + +#[async_trait::async_trait] +impl AuditLog for RusqliteAuditLog { + async fn record(&self, entry: AuditEntry) -> Result<()> { + let (secret_name, lease_id) = entry.event.indexed_fields(); + let event_id_str = entry.event_id.to_string(); + let timestamp_str = entry.timestamp.to_rfc3339(); + let event_str = serde_json::to_string(&entry.event) + .map_err(|e| Error::Storage(format!("failed to serialize event: {e}")))?; + let agent_str = entry.agent.as_str().to_string(); + let outcome_str = serde_json::to_string(&entry.outcome) + .map_err(|e| Error::Storage(format!("failed to serialize outcome: {e}")))?; + let peer = entry.peer_identity; + let conn = Arc::clone(&self.conn); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + conn.execute( + "INSERT INTO audit_events (event_id, timestamp, event, agent, peer_identity, outcome, secret_name, lease_id) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", + rusqlite::params![event_id_str, timestamp_str, event_str, agent_str, peer, outcome_str, secret_name, lease_id], + ) + .map_err(|e| Error::Storage(format!("failed to insert audit event: {e}")))?; + Ok(()) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn query_by_agent(&self, agent: &AgentId, limit: usize) -> Result> { + let conn = Arc::clone(&self.conn); + let agent_str = agent.as_str().to_string(); + let limit = limit as i64; + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + query_rows( + &conn, + "SELECT * FROM audit_events WHERE agent = ?1 ORDER BY timestamp DESC LIMIT ?2", + rusqlite::params![agent_str, limit], + ) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn query_by_secret(&self, secret: &SecretName, limit: usize) -> Result> { + let conn = Arc::clone(&self.conn); + let secret_str = secret.as_str().to_string(); + let limit = limit as i64; + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + query_rows( + &conn, + "SELECT * FROM audit_events WHERE secret_name = ?1 ORDER BY timestamp DESC LIMIT ?2", + rusqlite::params![secret_str, limit], + ) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn query_by_lease(&self, lease: &LeaseId) -> Result> { + let conn = Arc::clone(&self.conn); + let lease_str = lease.as_uuid().to_string(); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + query_rows( + &conn, + "SELECT * FROM audit_events WHERE lease_id = ?1 ORDER BY timestamp DESC", + rusqlite::params![lease_str], + ) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } +} + +#[cfg(test)] +mod tests { + use tempfile::NamedTempFile; + use zerolease::audit::AuditOutcome; + use zerolease::transport::PeerIdentity; + use zerolease::types::DomainScope; + + use super::*; + + async fn test_audit_log() -> (RusqliteAuditLog, NamedTempFile) { + let tmp = NamedTempFile::new().expect("should create temp file"); + let log = RusqliteAuditLog::new(tmp.path()) + .await + .expect("should create audit log"); + (log, tmp) + } + + fn make_entry(agent: &str, event: AuditEvent) -> AuditEntry { + AuditEntry::new( + event, + AgentId::new(agent), + &PeerIdentity::Anonymous, + AuditOutcome::Success, + ) + } + + #[tokio::test] + async fn record_and_query_by_agent() { + let (log, _tmp) = test_audit_log().await; + for _ in 0..3 { + log.record(make_entry("alice", AuditEvent::DekRotated)) + .await + .expect("record"); + } + log.record(make_entry("bob", AuditEvent::DekRotated)) + .await + .expect("record"); + + let results = log.query_by_agent(&AgentId::new("alice"), 10).await.expect("query"); + assert_eq!(results.len(), 3); + + let bob = log.query_by_agent(&AgentId::new("bob"), 10).await.expect("query"); + assert_eq!(bob.len(), 1); + } + + #[tokio::test] + async fn query_by_secret() { + let (log, _tmp) = test_audit_log().await; + log.record(make_entry( + "agent", + AuditEvent::LeaseGranted { + lease_id: LeaseId::new(), + secret_name: SecretName::new("secret-a"), + domains: vec![DomainScope::new("api.example.com")], + ttl_seconds: 900, + }, + )) + .await + .expect("record"); + + let results = log + .query_by_secret(&SecretName::new("secret-a"), 10) + .await + .expect("query"); + assert_eq!(results.len(), 1); + + let empty = log.query_by_secret(&SecretName::new("nope"), 10).await.expect("query"); + assert!(empty.is_empty()); + } + + #[tokio::test] + async fn query_by_lease() { + let (log, _tmp) = test_audit_log().await; + let lease = LeaseId::new(); + log.record(make_entry( + "agent", + AuditEvent::LeaseGranted { + lease_id: lease, + secret_name: SecretName::new("secret"), + domains: vec![DomainScope::new("example.com")], + ttl_seconds: 900, + }, + )) + .await + .expect("record"); + + let results = log.query_by_lease(&lease).await.expect("query"); + assert_eq!(results.len(), 1); + } +} diff --git a/crates/zerolease-store-rusqlite/src/lib.rs b/crates/zerolease-store-rusqlite/src/lib.rs new file mode 100644 index 0000000..5d146a3 --- /dev/null +++ b/crates/zerolease-store-rusqlite/src/lib.rs @@ -0,0 +1,14 @@ +//! Rusqlite-backed storage backends for zerolease. +//! +//! Provides `RusqliteStore` (implements `SecretStore`) and +//! `RusqliteAuditLog` (implements `AuditLog`) using `rusqlite`. +//! +//! Use this crate instead of `zerolease-store-sqlx` when your +//! application already depends on `rusqlite` (e.g., zeroclaw), +//! avoiding `libsqlite3-sys` link conflicts. + +mod audit; +mod store; + +pub use audit::RusqliteAuditLog; +pub use store::RusqliteStore; diff --git a/crates/zerolease-store-rusqlite/src/store.rs b/crates/zerolease-store-rusqlite/src/store.rs new file mode 100644 index 0000000..f6d294c --- /dev/null +++ b/crates/zerolease-store-rusqlite/src/store.rs @@ -0,0 +1,404 @@ +//! SQLite secret store backend using `rusqlite`. + +use std::path::Path; +use std::sync::Arc; + +use chrono::Utc; +use rusqlite::Connection; +use tokio::sync::Mutex; +use uuid::Uuid; +use zerolease::error::{Error, Result}; +use zerolease::store::{ + BatchUpdateItem, CipherAlgorithm, SecretKind, SecretMetadata, SecretStore, StoreSecretParams, StoredSecret, +}; +use zerolease::types::{SecretId, SecretName}; + +/// SQLite-backed implementation of [`SecretStore`] using `rusqlite`. +/// +/// Functionally identical to the `sqlx`-based `SqliteStore` but uses +/// `rusqlite` to share `libsqlite3-sys` with downstream crates. +/// Blocking calls are run via `tokio::task::spawn_blocking`. +pub struct RusqliteStore { + conn: Arc>, +} + +impl RusqliteStore { + /// Open (or create) a SQLite store at the given path. + /// + /// Creates the database file and schema if they do not already exist. + pub async fn new(path: impl AsRef) -> Result { + let path = path.as_ref().to_path_buf(); + let conn = tokio::task::spawn_blocking(move || { + let conn = Connection::open(&path).map_err(|e| Error::Storage(format!("failed to open database: {e}")))?; + conn.execute_batch(include_str!("../../../sql/sqlite_secrets.sql")) + .map_err(|e| Error::Storage(format!("failed to create schema: {e}")))?; + Ok::<_, Error>(conn) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))??; + + Ok(Self { + conn: Arc::new(Mutex::new(conn)), + }) + } +} + +#[async_trait::async_trait] +impl SecretStore for RusqliteStore { + async fn put(&self, params: StoreSecretParams) -> Result { + let id = SecretId::new(); + let now = Utc::now(); + let conn = Arc::clone(&self.conn); + + let id_str = id.as_uuid().to_string(); + let name_str = params.name.as_str().to_string(); + let algorithm_str = params.algorithm.to_db_string(); + let kind_str = params.kind.to_db_string(); + let now_str = now.to_rfc3339(); + let ciphertext = params.ciphertext.clone(); + let nonce = params.nonce.clone(); + let description = params.description.clone(); + let name_for_err = params.name.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + conn.execute( + "INSERT INTO secrets (id, name, ciphertext, nonce, algorithm, kind, description, created_at, updated_at, version) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, 1)", + rusqlite::params![ + id_str, name_str, ciphertext, nonce, algorithm_str, kind_str, + description, now_str, now_str, + ], + ) + .map_err(|e| { + if let rusqlite::Error::SqliteFailure(ref err, _) = e && err.code == rusqlite::ErrorCode::ConstraintViolation { + return Error::SecretAlreadyExists(name_for_err); + } + Error::Storage(format!("insert failed: {e}")) + }) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))??; + + Ok(StoredSecret { + id, + name: params.name, + ciphertext: params.ciphertext, + nonce: params.nonce, + algorithm: params.algorithm, + kind: params.kind, + description: params.description, + created_at: now, + updated_at: now, + version: 1, + }) + } + + async fn get(&self, name: &SecretName) -> Result { + let conn = Arc::clone(&self.conn); + let name_str = name.as_str().to_string(); + let name_clone = name.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + let mut stmt = conn + .prepare( + "SELECT id, name, ciphertext, nonce, algorithm, kind, description, created_at, updated_at, version + FROM secrets WHERE name = ?1", + ) + .map_err(|e| Error::Storage(format!("prepare failed: {e}")))?; + + stmt.query_row(rusqlite::params![name_str], |row| { + row_to_stored_secret(row) + .map_err(|e| rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(e))) + }) + .map_err(|e| match e { + rusqlite::Error::QueryReturnedNoRows => Error::SecretNotFound(name_clone), + _ => Error::Storage(format!("query failed: {e}")), + }) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn update( + &self, + name: &SecretName, + ciphertext: Vec, + nonce: Vec, + algorithm: CipherAlgorithm, + ) -> Result { + let conn = Arc::clone(&self.conn); + let name_str = name.as_str().to_string(); + let now_str = Utc::now().to_rfc3339(); + let algorithm_str = algorithm.to_db_string(); + let name_clone = name.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + let rows = conn + .execute( + "UPDATE secrets SET ciphertext = ?1, nonce = ?2, algorithm = ?3, updated_at = ?4, version = version + 1 + WHERE name = ?5", + rusqlite::params![ciphertext, nonce, algorithm_str, now_str, name_str], + ) + .map_err(|e| Error::Storage(format!("update failed: {e}")))?; + + if rows == 0 { + return Err(Error::SecretNotFound(name_clone.clone())); + } + + // Re-fetch in the same blocking context to avoid a second mutex acquisition. + let mut stmt = conn + .prepare( + "SELECT id, name, ciphertext, nonce, algorithm, kind, description, created_at, updated_at, version + FROM secrets WHERE name = ?1", + ) + .map_err(|e| Error::Storage(format!("prepare failed: {e}")))?; + + stmt.query_row(rusqlite::params![name_clone.as_str()], |row| { + row_to_stored_secret(row) + .map_err(|e| rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(e))) + }) + .map_err(|e| Error::Storage(format!("query failed: {e}"))) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn batch_update(&self, updates: Vec) -> Result<()> { + let conn = Arc::clone(&self.conn); + + tokio::task::spawn_blocking(move || { + let mut conn = conn.blocking_lock(); + let tx = conn + .transaction() + .map_err(|e| Error::Storage(format!("failed to begin transaction: {e}")))?; + + for item in &updates { + let name_str = item.name.as_str().to_string(); + let now_str = Utc::now().to_rfc3339(); + let algorithm_str = item.algorithm.to_db_string(); + + let rows = tx + .execute( + "UPDATE secrets SET ciphertext = ?1, nonce = ?2, algorithm = ?3, updated_at = ?4, version = version + 1 + WHERE name = ?5", + rusqlite::params![item.ciphertext, item.nonce, algorithm_str, now_str, name_str], + ) + .map_err(|e| Error::Storage(format!("batch update failed: {e}")))?; + + if rows == 0 { + return Err(Error::SecretNotFound(item.name.clone())); + } + } + + tx.commit() + .map_err(|e| Error::Storage(format!("transaction commit failed: {e}")))?; + Ok(()) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn delete(&self, name: &SecretName) -> Result<()> { + let conn = Arc::clone(&self.conn); + let name_str = name.as_str().to_string(); + let name_clone = name.clone(); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + let rows = conn + .execute("DELETE FROM secrets WHERE name = ?1", rusqlite::params![name_str]) + .map_err(|e| Error::Storage(format!("delete failed: {e}")))?; + + if rows == 0 { + return Err(Error::SecretNotFound(name_clone)); + } + Ok(()) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } + + async fn list(&self) -> Result> { + let conn = Arc::clone(&self.conn); + + tokio::task::spawn_blocking(move || { + let conn = conn.blocking_lock(); + let mut stmt = conn + .prepare("SELECT id, name, kind, description, created_at, updated_at, version FROM secrets") + .map_err(|e| Error::Storage(format!("prepare failed: {e}")))?; + + let rows = stmt + .query_map([], |row| { + row_to_metadata(row).map_err(|e| { + rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(e)) + }) + }) + .map_err(|e| Error::Storage(format!("list query failed: {e}")))?; + + let mut result = Vec::new(); + for row in rows { + result.push(row.map_err(|e| Error::Storage(format!("row extraction failed: {e}")))?); + } + Ok(result) + }) + .await + .map_err(|e| Error::Storage(format!("spawn_blocking failed: {e}")))? + } +} + +fn row_to_stored_secret(row: &rusqlite::Row<'_>) -> Result { + let id_str: String = row.get("id").map_err(|e| Error::Storage(e.to_string()))?; + let id_uuid = Uuid::parse_str(&id_str).map_err(|e| Error::Storage(e.to_string()))?; + let name_str: String = row.get("name").map_err(|e| Error::Storage(e.to_string()))?; + let algorithm_str: String = row.get("algorithm").map_err(|e| Error::Storage(e.to_string()))?; + let kind_str: String = row.get("kind").map_err(|e| Error::Storage(e.to_string()))?; + let description: Option = row.get("description").map_err(|e| Error::Storage(e.to_string()))?; + let created_at_str: String = row.get("created_at").map_err(|e| Error::Storage(e.to_string()))?; + let updated_at_str: String = row.get("updated_at").map_err(|e| Error::Storage(e.to_string()))?; + let version: i32 = row.get("version").map_err(|e| Error::Storage(e.to_string()))?; + let ciphertext: Vec = row.get("ciphertext").map_err(|e| Error::Storage(e.to_string()))?; + let nonce: Vec = row.get("nonce").map_err(|e| Error::Storage(e.to_string()))?; + + Ok(StoredSecret { + id: SecretId::from_uuid(id_uuid), + name: SecretName::new(name_str), + ciphertext, + nonce, + algorithm: CipherAlgorithm::from_db_string(&algorithm_str)?, + kind: SecretKind::from_db_string(&kind_str)?, + description, + created_at: chrono::DateTime::parse_from_rfc3339(&created_at_str) + .map_err(|e| Error::Storage(format!("invalid created_at: {e}")))? + .with_timezone(&Utc), + updated_at: chrono::DateTime::parse_from_rfc3339(&updated_at_str) + .map_err(|e| Error::Storage(format!("invalid updated_at: {e}")))? + .with_timezone(&Utc), + version: version as u32, + }) +} + +fn row_to_metadata(row: &rusqlite::Row<'_>) -> Result { + let id_str: String = row.get("id").map_err(|e| Error::Storage(e.to_string()))?; + let id_uuid = Uuid::parse_str(&id_str).map_err(|e| Error::Storage(e.to_string()))?; + let name_str: String = row.get("name").map_err(|e| Error::Storage(e.to_string()))?; + let kind_str: String = row.get("kind").map_err(|e| Error::Storage(e.to_string()))?; + let description: Option = row.get("description").map_err(|e| Error::Storage(e.to_string()))?; + let created_at_str: String = row.get("created_at").map_err(|e| Error::Storage(e.to_string()))?; + let updated_at_str: String = row.get("updated_at").map_err(|e| Error::Storage(e.to_string()))?; + let version: i32 = row.get("version").map_err(|e| Error::Storage(e.to_string()))?; + + Ok(SecretMetadata { + id: SecretId::from_uuid(id_uuid), + name: SecretName::new(name_str), + kind: SecretKind::from_db_string(&kind_str)?, + description, + created_at: chrono::DateTime::parse_from_rfc3339(&created_at_str) + .map_err(|e| Error::Storage(format!("invalid created_at: {e}")))? + .with_timezone(&Utc), + updated_at: chrono::DateTime::parse_from_rfc3339(&updated_at_str) + .map_err(|e| Error::Storage(format!("invalid updated_at: {e}")))? + .with_timezone(&Utc), + version: version as u32, + }) +} + +#[cfg(test)] +mod tests { + use tempfile::NamedTempFile; + use zerolease::store::CipherAlgorithm; + + use super::*; + + async fn test_store() -> (RusqliteStore, NamedTempFile) { + let tmp = NamedTempFile::new().expect("should create temp file"); + let store = RusqliteStore::new(tmp.path()).await.expect("should create store"); + (store, tmp) + } + + fn test_params(name: &str) -> StoreSecretParams { + StoreSecretParams { + name: SecretName::new(name), + ciphertext: vec![1, 2, 3, 4], + nonce: vec![5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + algorithm: CipherAlgorithm::Aes256Gcm, + kind: SecretKind::Pat, + description: Some("test secret".into()), + } + } + + #[tokio::test] + async fn put_and_get_round_trip() { + let (store, _tmp) = test_store().await; + let params = test_params("my-secret"); + let stored = store.put(params).await.expect("should put secret"); + assert_eq!(stored.name, SecretName::new("my-secret")); + assert_eq!(stored.ciphertext, vec![1, 2, 3, 4]); + assert_eq!(stored.version, 1); + let fetched = store + .get(&SecretName::new("my-secret")) + .await + .expect("should get secret"); + assert_eq!(fetched.name, stored.name); + assert_eq!(fetched.ciphertext, stored.ciphertext); + } + + #[tokio::test] + async fn put_duplicate_name_errors() { + let (store, _tmp) = test_store().await; + store.put(test_params("dup")).await.expect("first put"); + let err = store + .put(test_params("dup")) + .await + .expect_err("duplicate put") + .to_string(); + assert!(err.contains("already exists"), "error was: {err}"); + } + + #[tokio::test] + async fn get_missing_errors() { + let (store, _tmp) = test_store().await; + let err = store + .get(&SecretName::new("nope")) + .await + .expect_err("missing get") + .to_string(); + assert!(err.contains("not found"), "error was: {err}"); + } + + #[tokio::test] + async fn update_increments_version() { + let (store, _tmp) = test_store().await; + store.put(test_params("versioned")).await.expect("put"); + let updated = store + .update( + &SecretName::new("versioned"), + vec![10, 20], + vec![1; 12], + CipherAlgorithm::Aes256Gcm, + ) + .await + .expect("update"); + assert_eq!(updated.version, 2); + assert_eq!(updated.ciphertext, vec![10, 20]); + } + + #[tokio::test] + async fn delete_removes_secret() { + let (store, _tmp) = test_store().await; + store.put(test_params("doomed")).await.expect("put"); + store.delete(&SecretName::new("doomed")).await.expect("delete"); + assert!(store.get(&SecretName::new("doomed")).await.is_err()); + } + + #[tokio::test] + async fn list_returns_metadata() { + let (store, _tmp) = test_store().await; + store.put(test_params("first")).await.expect("put first"); + store.put(test_params("second")).await.expect("put second"); + let list = store.list().await.expect("list"); + assert_eq!(list.len(), 2); + } +} diff --git a/examples/basic_vault.rs b/examples/basic_vault.rs index f7736f3..f900204 100644 --- a/examples/basic_vault.rs +++ b/examples/basic_vault.rs @@ -12,11 +12,11 @@ use zerolease::audit::AuditLog; use zerolease::keysource::env::EnvVarSource; use zerolease::lease::LeaseTerms; use zerolease::policy::{AgentPattern, PolicyConfig, PolicyEngine, PolicyGrant, SecretPattern}; -use zerolease::store::sqlite::SqliteStore; use zerolease::store::{CipherAlgorithm, SecretKind}; use zerolease::transport::PeerIdentity; use zerolease::types::{AgentId, DomainScope, SecretName}; use zerolease::vault::Vault; +use zerolease_store_rusqlite::RusqliteStore; /// A no-op audit log for this example. In production, use SqliteAuditLog /// or configure a tracing subscriber to capture audit events. @@ -58,7 +58,7 @@ async fn main() -> Result<(), Box> { let key_source = EnvVarSource::new("ZEROLEASE_KEY"); // SQLite for secret storage (single-file, zero-config). - let store = SqliteStore::new("example-secrets.db").await?; + let store = RusqliteStore::new("example-secrets.db").await?; // Policy: allow "my-agent" to access "github-pat" for github.com let policy = PolicyEngine::new(PolicyConfig { diff --git a/sql/postgres_audit_table.sql b/sql/postgres_audit_table.sql new file mode 100644 index 0000000..0c22415 --- /dev/null +++ b/sql/postgres_audit_table.sql @@ -0,0 +1,18 @@ +-- zerolease audit log table (PostgreSQL) +-- Schema version: 1 + +CREATE TABLE IF NOT EXISTS audit_events ( + event_id TEXT PRIMARY KEY, + timestamp TIMESTAMPTZ NOT NULL, + event TEXT NOT NULL, + agent TEXT NOT NULL, + peer_identity TEXT NOT NULL, + outcome TEXT NOT NULL, + secret_name TEXT, + lease_id TEXT +); + +CREATE INDEX IF NOT EXISTS idx_audit_agent ON audit_events(agent); +CREATE INDEX IF NOT EXISTS idx_audit_secret ON audit_events(secret_name); +CREATE INDEX IF NOT EXISTS idx_audit_lease ON audit_events(lease_id); +CREATE INDEX IF NOT EXISTS idx_audit_timestamp ON audit_events(timestamp); diff --git a/src/audit/mod.rs b/src/audit/mod.rs index a117efa..748a89c 100644 --- a/src/audit/mod.rs +++ b/src/audit/mod.rs @@ -29,8 +29,8 @@ use crate::error::Result; use crate::transport::PeerIdentity; use crate::types::{AgentId, DomainScope, LeaseId, SecretName}; -#[cfg(feature = "sqlite")] -pub mod sqlite; +// Audit log implementations live in separate crates alongside their +// corresponding store backends. /// A single audit log entry. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -118,6 +118,33 @@ pub enum AuditEvent { PolicyReloaded { grant_count: usize }, } +impl AuditEvent { + /// Extract denormalized `secret_name` and `lease_id` for database indexing. + pub fn indexed_fields(&self) -> (Option, Option) { + match self { + Self::LeaseGranted { + secret_name, lease_id, .. + } => ( + Some(secret_name.as_str().to_string()), + Some(lease_id.as_uuid().to_string()), + ), + Self::SecretAccessed { + secret_name, lease_id, .. + } => ( + Some(secret_name.as_str().to_string()), + Some(lease_id.as_uuid().to_string()), + ), + Self::LeaseRevoked { lease_id, .. } => (None, Some(lease_id.as_uuid().to_string())), + Self::LeaseRenewed { lease_id, .. } => (None, Some(lease_id.as_uuid().to_string())), + Self::AccessDenied { secret_name, .. } => (Some(secret_name.as_str().to_string()), None), + Self::SecretStored { secret_name } => (Some(secret_name.as_str().to_string()), None), + Self::SecretRotated { secret_name, .. } => (Some(secret_name.as_str().to_string()), None), + Self::SecretDeleted { secret_name } => (Some(secret_name.as_str().to_string()), None), + Self::DekRotated | Self::PolicyReloaded { .. } => (None, None), + } + } +} + /// Why a lease was revoked. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum RevocationReason { diff --git a/src/bin/zerolease.rs b/src/bin/zerolease.rs deleted file mode 100644 index d6cd0e5..0000000 --- a/src/bin/zerolease.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! zerolease standalone vault server. -//! -//! Starts a vault server on a Unix domain socket with SQLite storage -//! and environment variable key management. Handles ctrl-c for -//! graceful shutdown. -//! -//! Usage: -//! ZEROLEASE_KEY=$(openssl rand -hex 32) cargo run -- \ -//! --socket /tmp/zerolease.sock \ -//! --db /tmp/zerolease-secrets.db \ -//! --audit-db /tmp/zerolease-audit.db -//! -//! Connect with a client or test with: -//! echo '{"protocol":"zerolease","version":1}' | socat - -//! UNIX-CONNECT:/tmp/zerolease.sock - -use std::path::PathBuf; -use std::sync::Arc; - -use zerolease::audit::sqlite::SqliteAuditLog; -use zerolease::auth::AllowAllAdmin; -use zerolease::keysource::env::EnvVarSource; -use zerolease::server::VaultServer; -use zerolease::store::CipherAlgorithm; -use zerolease::store::sqlite::SqliteStore; -use zerolease::transport::uds::UdsListener; -use zerolease::vault::Vault; - -fn main() -> Result<(), Box> { - // Parse args (minimal — no clap dependency) - let args: Vec = std::env::args().collect(); - let socket_path = get_arg(&args, "--socket").unwrap_or_else(|| "/tmp/zerolease.sock".into()); - let db_path = get_arg(&args, "--db").unwrap_or_else(|| "/tmp/zerolease-secrets.db".into()); - let audit_path = get_arg(&args, "--audit-db").unwrap_or_else(|| "/tmp/zerolease-audit.db".into()); - let key_var = get_arg(&args, "--key-var").unwrap_or_else(|| "ZEROLEASE_KEY".into()); - - // Initialize tracing (structured JSON to stderr) - tracing_subscriber::fmt() - .with_env_filter( - tracing_subscriber::EnvFilter::try_from_default_env() - .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), - ) - .json() - .init(); - - // Run the async server - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build()? - .block_on(run(socket_path, db_path, audit_path, key_var)) -} - -async fn run( - socket_path: String, - db_path: String, - audit_path: String, - key_var: String, -) -> Result<(), Box> { - tracing::info!( - socket = %socket_path, - db = %db_path, - audit = %audit_path, - key_var = %key_var, - "starting zerolease vault" - ); - - // Set up components - let key_source = EnvVarSource::new(&key_var); - let store = SqliteStore::new(&db_path).await?; - let audit = SqliteAuditLog::new(&audit_path).await?; - - let policy = zerolease::policy::PolicyEngine::new(zerolease::policy::PolicyConfig { - default_lease_terms: zerolease::lease::LeaseTerms::default_short(), - grants: vec![], - }); - - let vault = Arc::new(Vault::new(key_source, store, audit, policy, CipherAlgorithm::Aes256Gcm)); - - vault.initialize().await?; - tracing::info!("vault initialized, DEK loaded"); - - // Remove stale socket file if it exists - let sock = PathBuf::from(&socket_path); - if sock.exists() { - std::fs::remove_file(&sock)?; - tracing::info!("removed stale socket file"); - } - - let listener = UdsListener::bind(&sock)?; - tracing::info!(socket = %socket_path, "listening for connections"); - - let server = VaultServer::new(vault, listener, Arc::new(AllowAllAdmin)); - - // Run until ctrl-c - server - .serve_with_shutdown(async { - tokio::signal::ctrl_c().await.expect("failed to listen for ctrl-c"); - }) - .await?; - - // Clean up socket - std::fs::remove_file(&sock).ok(); - tracing::info!("server stopped"); - - Ok(()) -} - -fn get_arg(args: &[String], flag: &str) -> Option { - args.windows(2).find(|w| w[0] == flag).map(|w| w[1].clone()) -} diff --git a/src/client.rs b/src/client.rs index d3408c9..424f8a3 100644 --- a/src/client.rs +++ b/src/client.rs @@ -252,7 +252,7 @@ mod tests { use crate::lease::LeaseTerms; use crate::policy::{AgentPattern, PolicyConfig, PolicyEngine, PolicyGrant, SecretPattern}; use crate::server::VaultServer; - use crate::store::sqlite::SqliteStore; + use zerolease_store_rusqlite::RusqliteStore; use crate::store::{CipherAlgorithm, SecretKind}; use crate::transport::uds::{UdsConnector, UdsListener}; use crate::types::{AgentId, DomainScope, LeaseId, SecretName}; @@ -292,7 +292,7 @@ mod tests { let db_path = dir.path().join("secrets.db"); let key_source = EnvVarSource::new(env_var); - let store = SqliteStore::new(&db_path) + let store = RusqliteStore::new(&db_path) .await .expect("failed to initialize SQLite store"); let audit = NoopAuditLog; diff --git a/src/keysource/mod.rs b/src/keysource/mod.rs index 5c6aa65..b54ee60 100644 --- a/src/keysource/mod.rs +++ b/src/keysource/mod.rs @@ -164,7 +164,7 @@ impl KeySourceConfig { encrypted_dek_path, } => { let source = kms::KmsSource::new(key_id, region, encrypted_dek_path).await?; - Ok(Box::new(source) as Box) + Ok(Box::new(source)) } #[cfg(not(feature = "kms"))] diff --git a/src/server.rs b/src/server.rs index 4a556ca..fe95bdd 100644 --- a/src/server.rs +++ b/src/server.rs @@ -465,7 +465,7 @@ mod tests { use crate::policy::{AgentPattern, PolicyConfig, PolicyEngine, PolicyGrant, SecretPattern}; use crate::protocol::{Request, methods}; use crate::store::CipherAlgorithm; - use crate::store::sqlite::SqliteStore; + use zerolease_store_rusqlite::RusqliteStore; use crate::transport::PeerIdentity; use crate::types::{AgentId, DomainScope, LeaseId, SecretName}; use crate::vault::Vault; @@ -499,7 +499,7 @@ mod tests { async fn test_vault( env_var: &str, grants: Vec, - ) -> (Arc>, NamedTempFile) { + ) -> (Arc>, NamedTempFile) { // SAFETY: tests run single-threaded via --test-threads=1 #[allow(unsafe_code)] unsafe { @@ -508,7 +508,7 @@ mod tests { let key_source = EnvVarSource::new(env_var); let tmp = NamedTempFile::new().expect("should create temp file"); - let store = SqliteStore::new(tmp.path()).await.expect("should create store"); + let store = RusqliteStore::new(tmp.path()).await.expect("should create store"); let audit = NoopAuditLog; let policy = PolicyEngine::new(PolicyConfig { diff --git a/src/store/mod.rs b/src/store/mod.rs index 57c1540..56a6cfa 100644 --- a/src/store/mod.rs +++ b/src/store/mod.rs @@ -19,11 +19,9 @@ use serde::{Deserialize, Serialize}; use crate::error::Result; use crate::types::{SecretId, SecretName}; -#[cfg(feature = "postgres")] -pub mod postgres; - -#[cfg(feature = "sqlite")] -pub mod sqlite; +// Storage backend implementations live in separate crates: +// - zerolease-store-rusqlite (for apps using rusqlite, e.g. zeroclaw) +// - zerolease-store-sqlx (for standalone deployments) [planned] /// An encrypted secret as stored in the backend. /// @@ -70,6 +68,18 @@ pub enum CipherAlgorithm { XChaCha20Poly1305, } +impl CipherAlgorithm { + /// Serialize to a JSON string for database storage. + pub fn to_db_string(&self) -> String { + serde_json::to_string(self).expect("CipherAlgorithm serialization is infallible") + } + + /// Deserialize from a JSON string read from the database. + pub fn from_db_string(s: &str) -> Result { + serde_json::from_str(s).map_err(|e| crate::error::Error::Storage(format!("invalid algorithm value: {e}"))) + } +} + /// What kind of credential this is. Informs how it should be injected /// into requests (e.g., as a Bearer token header vs. basic auth). #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -95,6 +105,18 @@ pub enum SecretKind { Opaque, } +impl SecretKind { + /// Serialize to a JSON string for database storage. + pub fn to_db_string(&self) -> String { + serde_json::to_string(self).expect("SecretKind serialization is infallible") + } + + /// Deserialize from a JSON string read from the database. + pub fn from_db_string(s: &str) -> Result { + serde_json::from_str(s).map_err(|e| crate::error::Error::Storage(format!("invalid kind value: {e}"))) + } +} + /// Parameters for storing a new secret. The vault encrypts the plaintext /// value before passing it to the store. #[derive(Debug)] diff --git a/src/vault.rs b/src/vault.rs index 44e1ba7..fb83ae0 100644 --- a/src/vault.rs +++ b/src/vault.rs @@ -575,20 +575,17 @@ where #[cfg(test)] mod tests { - #[cfg(feature = "sqlite")] - use tempfile::NamedTempFile; + #[cfg(feature = "sqlite")] use super::*; - use crate::audit::*; - use crate::keysource::env::EnvVarSource; - use crate::policy::{AgentPattern, PolicyConfig, PolicyGrant, SecretPattern}; - use crate::store::SecretKind; #[cfg(feature = "sqlite")] - use crate::store::sqlite::SqliteStore; + use crate::audit::*; /// A no-op audit log that discards all events. For testing only. + #[cfg(feature = "sqlite")] struct NoopAuditLog; + #[cfg(feature = "sqlite")] #[async_trait::async_trait] impl AuditLog for NoopAuditLog { async fn record(&self, _entry: AuditEntry) -> Result<()> { @@ -622,7 +619,7 @@ mod tests { // Create components let key_source = EnvVarSource::new(key_var); let tmp = NamedTempFile::new().expect("failed to create temp file for test DB"); - let store = SqliteStore::new(tmp.path()) + let store = zerolease_store_rusqlite::RusqliteStore::new(tmp.path()) .await .expect("failed to initialize SQLite store"); let audit = NoopAuditLog;