diff --git a/.sqlx/query-32d118e607db4364979c52831e0c30a215779928a041ef51e93383e93288aac2.json b/.sqlx/query-32d118e607db4364979c52831e0c30a215779928a041ef51e93383e93288aac2.json index e23eb43f..217c8d3c 100644 --- a/.sqlx/query-32d118e607db4364979c52831e0c30a215779928a041ef51e93383e93288aac2.json +++ b/.sqlx/query-32d118e607db4364979c52831e0c30a215779928a041ef51e93383e93288aac2.json @@ -47,6 +47,11 @@ "ordinal": 8, "name": "updated_at", "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "name", + "type_info": "Varchar" } ], "parameters": { @@ -63,6 +68,7 @@ true, true, false, + false, false ] }, diff --git a/.sqlx/query-8ec4c1e77a941efe4c1c36e26c5e1dfcb0e7769f0333d2acf7d6e0fb97ca12dc.json b/.sqlx/query-4048935127dfdfa4f8d1c7ec9137149b736702a008e920373c139d5cc8f228a5.json similarity index 51% rename from .sqlx/query-8ec4c1e77a941efe4c1c36e26c5e1dfcb0e7769f0333d2acf7d6e0fb97ca12dc.json rename to .sqlx/query-4048935127dfdfa4f8d1c7ec9137149b736702a008e920373c139d5cc8f228a5.json index 06797523..3cfffed6 100644 --- a/.sqlx/query-8ec4c1e77a941efe4c1c36e26c5e1dfcb0e7769f0333d2acf7d6e0fb97ca12dc.json +++ b/.sqlx/query-4048935127dfdfa4f8d1c7ec9137149b736702a008e920373c139d5cc8f228a5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO cloud (\n user_id,\n provider,\n cloud_token,\n cloud_key,\n cloud_secret,\n save_token,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, NOW() at time zone 'utc', NOW() at time zone 'utc')\n RETURNING id;\n ", + "query": "\n INSERT INTO cloud (\n user_id,\n name,\n provider,\n cloud_token,\n cloud_key,\n cloud_secret,\n save_token,\n created_at,\n updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, NOW() at time zone 'utc', NOW() at time zone 'utc')\n RETURNING id;\n ", "describe": { "columns": [ { @@ -16,6 +16,7 @@ "Varchar", "Varchar", "Varchar", + "Varchar", "Bool" ] }, @@ -23,5 +24,5 @@ false ] }, - "hash": "8ec4c1e77a941efe4c1c36e26c5e1dfcb0e7769f0333d2acf7d6e0fb97ca12dc" + "hash": "4048935127dfdfa4f8d1c7ec9137149b736702a008e920373c139d5cc8f228a5" } diff --git a/.sqlx/query-b8296183bd28695d3a7574e57db445dc1f4b2d659a3805f92f6f5f83b562266b.json b/.sqlx/query-b8296183bd28695d3a7574e57db445dc1f4b2d659a3805f92f6f5f83b562266b.json index a924adf9..4b63dd09 100644 --- a/.sqlx/query-b8296183bd28695d3a7574e57db445dc1f4b2d659a3805f92f6f5f83b562266b.json +++ b/.sqlx/query-b8296183bd28695d3a7574e57db445dc1f4b2d659a3805f92f6f5f83b562266b.json @@ -47,6 +47,11 @@ "ordinal": 8, "name": "updated_at", "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "name", + "type_info": "Varchar" } ], "parameters": { @@ -63,6 +68,7 @@ true, true, false, + false, false ] }, diff --git a/.sqlx/query-7b6c7e798237d0c08b7c1126d7044df13c46ef2eb373398a535090edf738cb5a.json b/.sqlx/query-e0bc560df5637788c7096c0bf0535cc601af9ca4a06bd87100cd68a251431618.json similarity index 73% rename from .sqlx/query-7b6c7e798237d0c08b7c1126d7044df13c46ef2eb373398a535090edf738cb5a.json rename to .sqlx/query-e0bc560df5637788c7096c0bf0535cc601af9ca4a06bd87100cd68a251431618.json index ed0cd48d..1fe1ad13 100644 --- a/.sqlx/query-7b6c7e798237d0c08b7c1126d7044df13c46ef2eb373398a535090edf738cb5a.json +++ b/.sqlx/query-e0bc560df5637788c7096c0bf0535cc601af9ca4a06bd87100cd68a251431618.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE cloud\n SET\n user_id=$2,\n provider=$3,\n cloud_token=$4,\n cloud_key=$5,\n cloud_secret=$6,\n save_token=$7,\n updated_at=NOW() at time zone 'utc'\n WHERE id = $1\n RETURNING *\n ", + "query": "\n UPDATE cloud\n SET\n user_id=$2,\n name=$3,\n provider=$4,\n cloud_token=$5,\n cloud_key=$6,\n cloud_secret=$7,\n save_token=$8,\n updated_at=NOW() at time zone 'utc'\n WHERE id = $1\n RETURNING *\n ", "describe": { "columns": [ { @@ -47,6 +47,11 @@ "ordinal": 8, "name": "updated_at", "type_info": "Timestamptz" + }, + { + "ordinal": 9, + "name": "name", + "type_info": "Varchar" } ], "parameters": { @@ -57,6 +62,7 @@ "Varchar", "Varchar", "Varchar", + "Varchar", "Bool" ] }, @@ -69,8 +75,9 @@ true, true, false, + false, false ] }, - "hash": "7b6c7e798237d0c08b7c1126d7044df13c46ef2eb373398a535090edf738cb5a" + "hash": "e0bc560df5637788c7096c0bf0535cc601af9ca4a06bd87100cd68a251431618" } diff --git a/migrations/20260306120000_add_cloud_name.down.sql b/migrations/20260306120000_add_cloud_name.down.sql new file mode 100644 index 00000000..ae04cabe --- /dev/null +++ b/migrations/20260306120000_add_cloud_name.down.sql @@ -0,0 +1,2 @@ +DROP INDEX IF EXISTS idx_cloud_user_name; +ALTER TABLE cloud DROP COLUMN IF EXISTS name; diff --git a/migrations/20260306120000_add_cloud_name.up.sql b/migrations/20260306120000_add_cloud_name.up.sql new file mode 100644 index 00000000..7af2a1cf --- /dev/null +++ b/migrations/20260306120000_add_cloud_name.up.sql @@ -0,0 +1,12 @@ +-- Add a human-friendly name to cloud credentials so users can reference them +-- by name (e.g. `stacker deploy --key my-hetzner`) instead of by provider. +ALTER TABLE cloud ADD COLUMN name VARCHAR(100); + +-- Backfill existing rows: default name = "{provider}-{id}" (e.g. "htz-4") +UPDATE cloud SET name = provider || '-' || id WHERE name IS NULL; + +-- Make name NOT NULL after backfill +ALTER TABLE cloud ALTER COLUMN name SET NOT NULL; + +-- Unique per user: a user can't have two cloud keys with the same name +CREATE UNIQUE INDEX idx_cloud_user_name ON cloud (user_id, name); diff --git a/migrations/20260306190000_casbin_client_role_mapping.down.sql b/migrations/20260306190000_casbin_client_role_mapping.down.sql new file mode 100644 index 00000000..8f134f20 --- /dev/null +++ b/migrations/20260306190000_casbin_client_role_mapping.down.sql @@ -0,0 +1,9 @@ +-- Revert client role Casbin mappings +DELETE FROM public.casbin_rule WHERE ptype = 'g' AND v0 = 'client' AND v1 = 'group_anonymous'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/api/v1/agent/register' AND v2 = 'POST'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/api/v1/agent/commands/wait/:deployment_hash' AND v2 = 'GET'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/api/v1/agent/commands/report' AND v2 = 'POST'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/project/:id/deploy' AND v2 = 'POST'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/project/:id/deploy/:cloud_id' AND v2 = 'POST'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/project/:id/compose' AND v2 = 'GET'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'client' AND v1 = '/project/:id/compose' AND v2 = 'POST'; diff --git a/migrations/20260306190000_casbin_client_role_mapping.up.sql b/migrations/20260306190000_casbin_client_role_mapping.up.sql new file mode 100644 index 00000000..658e8719 --- /dev/null +++ b/migrations/20260306190000_casbin_client_role_mapping.up.sql @@ -0,0 +1,44 @@ +-- Fix 403 on agent registration when using HMAC auth (client role). +-- The HMAC middleware now sets subject = "client" (previously was the numeric +-- client_id which had no Casbin mapping at all). +-- Ensure the "client" role inherits from group_anonymous (like group_user/group_admin). + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('g', 'client', 'group_anonymous', '', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +-- Safety: ensure agent register is accessible by group_anonymous +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_anonymous', '/api/v1/agent/register', 'POST', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +-- Safety: ensure client has explicit access to agent register +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/api/v1/agent/register', 'POST', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +-- Grant client access to other agent endpoints (wait, report, enqueue) +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/api/v1/agent/commands/wait/:deployment_hash', 'GET', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/api/v1/agent/commands/report', 'POST', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +-- Grant client access to deploy-related endpoints that HMAC clients need +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/project/:id/deploy', 'POST', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/project/:id/deploy/:cloud_id', 'POST', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/project/:id/compose', 'GET', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'client', '/project/:id/compose', 'POST', '', '', '') +ON CONFLICT ON CONSTRAINT unique_key_sqlx_adapter DO NOTHING; diff --git a/src/bin/stacker.rs b/src/bin/stacker.rs index a85d4fdd..0c29abaa 100644 --- a/src/bin/stacker.rs +++ b/src/bin/stacker.rs @@ -91,6 +91,9 @@ enum StackerCommands { /// Name of saved cloud credential to reuse (overrides deploy.cloud.key in stacker.yml) #[arg(long, value_name = "KEY_NAME")] key: Option, + /// ID of saved cloud credential to reuse (from `stacker list clouds`) + #[arg(long, value_name = "CLOUD_ID")] + key_id: Option, /// Name of saved server to reuse (overrides deploy.cloud.server in stacker.yml) #[arg(long, value_name = "SERVER_NAME")] server: Option, @@ -100,6 +103,12 @@ enum StackerCommands { /// Disable automatic progress watching after deploy #[arg(long)] no_watch: bool, + /// Persist server details into stacker.yml after deploy (for redeploy) + #[arg(long)] + lock: bool, + /// Skip server pre-check; force fresh cloud provision even if deploy.server exists + #[arg(long)] + force_new: bool, }, /// Show container logs Logs { @@ -207,6 +216,12 @@ enum ListCommands { #[arg(long)] json: bool, }, + /// List saved cloud credentials + Clouds { + /// Output in JSON format + #[arg(long)] + json: bool, + }, } #[derive(Debug, Subcommand)] @@ -306,6 +321,16 @@ enum ConfigCommands { #[arg(long, default_value_t = true)] interactive: bool, }, + /// Persist deployment lock into stacker.yml (writes deploy.server from last deploy) + Lock { + #[arg(long, value_name = "FILE")] + file: Option, + }, + /// Remove deploy.server section from stacker.yml (allows fresh cloud provision) + Unlock { + #[arg(long, value_name = "FILE")] + file: Option, + }, /// Guided setup helpers Setup { #[command(subcommand)] @@ -500,9 +525,12 @@ fn get_command( force_rebuild, project, key, + key_id, server, watch, no_watch, + lock, + force_new, } => Box::new( stacker::console::commands::cli::deploy::DeployCommand::new( target, @@ -511,7 +539,10 @@ fn get_command( force_rebuild, ) .with_remote_overrides(project, key, server) - .with_watch(watch, no_watch), + .with_key_id(key_id) + .with_watch(watch, no_watch) + .with_lock(lock) + .with_force_new(force_new), ), StackerCommands::Logs { service, @@ -540,6 +571,12 @@ fn get_command( ConfigCommands::Fix { file, interactive } => Box::new( stacker::console::commands::cli::config::ConfigFixCommand::new(file, interactive), ), + ConfigCommands::Lock { file } => Box::new( + stacker::console::commands::cli::config::ConfigLockCommand::new(file), + ), + ConfigCommands::Unlock { file } => Box::new( + stacker::console::commands::cli::config::ConfigUnlockCommand::new(file), + ), ConfigCommands::Setup { command } => match command { ConfigSetupCommands::Cloud { file } => Box::new( stacker::console::commands::cli::config::ConfigSetupCloudCommand::new(file), @@ -590,6 +627,9 @@ fn get_command( ListCommands::SshKeys { json } => Box::new( stacker::console::commands::cli::list::ListSshKeysCommand::new(json), ), + ListCommands::Clouds { json } => Box::new( + stacker::console::commands::cli::list::ListCloudsCommand::new(json), + ), }, StackerCommands::SshKey { command: ssh_cmd } => match ssh_cmd { SshKeyCommands::Generate { server_id, save_to } => Box::new( diff --git a/src/cli/deployment_lock.rs b/src/cli/deployment_lock.rs new file mode 100644 index 00000000..872bb98a --- /dev/null +++ b/src/cli/deployment_lock.rs @@ -0,0 +1,366 @@ +use std::path::{Path, PathBuf}; + +use chrono::Utc; +use serde::{Deserialize, Serialize}; + +use crate::cli::config_parser::{DeployTarget, ServerConfig, StackerConfig}; +use crate::cli::error::CliError; +use crate::cli::install_runner::DeployResult; + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// DeploymentLock — persisted deployment context +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// Filename for the deployment lockfile inside `.stacker/`. +pub const LOCKFILE_NAME: &str = "deployment.lock"; + +/// Persisted deployment context written after a successful deploy. +/// +/// Lives in `.stacker/deployment.lock` and allows subsequent deploys +/// to reuse the same server without requiring manual stacker.yml edits. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DeploymentLock { + /// Deploy target that was used (local / cloud / server). + pub target: String, + + /// IP address of the provisioned/used server. + pub server_ip: Option, + + /// SSH user on the target server. + pub ssh_user: Option, + + /// SSH port on the target server. + pub ssh_port: Option, + + /// Server name on the Stacker platform (for `--server` reuse). + pub server_name: Option, + + /// Stacker server deployment ID. + pub deployment_id: Option, + + /// Stacker server project ID. + pub project_id: Option, + + /// Cloud credential ID used for this deployment. + pub cloud_id: Option, + + /// Project name as known by the Stacker server. + pub project_name: Option, + + /// ISO 8601 timestamp of the deployment. + pub deployed_at: String, +} + +impl DeploymentLock { + // ── Constructors ───────────────────────────────── + + /// Build a lock from a `DeployResult` (basic info available immediately after deploy). + pub fn from_result(result: &DeployResult) -> Self { + Self { + target: format!("{:?}", result.target).to_lowercase(), + server_ip: result.server_ip.clone(), + ssh_user: None, + ssh_port: None, + server_name: None, + deployment_id: result.deployment_id, + project_id: result.project_id, + cloud_id: None, + project_name: None, + deployed_at: Utc::now().to_rfc3339(), + } + } + + /// Build a lock for a local deploy. + pub fn for_local() -> Self { + Self { + target: "local".to_string(), + server_ip: Some("127.0.0.1".to_string()), + ssh_user: None, + ssh_port: None, + server_name: None, + deployment_id: None, + project_id: None, + cloud_id: None, + project_name: None, + deployed_at: Utc::now().to_rfc3339(), + } + } + + /// Build a lock for a server (SSH) deploy from the config. + pub fn for_server(server_cfg: &ServerConfig) -> Self { + Self { + target: "server".to_string(), + server_ip: Some(server_cfg.host.clone()), + ssh_user: Some(server_cfg.user.clone()), + ssh_port: Some(server_cfg.port), + server_name: None, + deployment_id: None, + project_id: None, + cloud_id: None, + project_name: None, + deployed_at: Utc::now().to_rfc3339(), + } + } + + // ── Enrichment (builder pattern) ───────────────── + + /// Enrich with server details fetched from the Stacker API. + pub fn with_server_info( + mut self, + ip: Option, + user: Option, + port: Option, + name: Option, + cloud_id: Option, + ) -> Self { + if ip.is_some() { + self.server_ip = ip; + } + if user.is_some() { + self.ssh_user = user; + } + if port.is_some() { + self.ssh_port = port; + } + if name.is_some() { + self.server_name = name; + } + if cloud_id.is_some() { + self.cloud_id = cloud_id; + } + self + } + + pub fn with_project_name(mut self, name: Option) -> Self { + if name.is_some() { + self.project_name = name; + } + self + } + + // ── Persistence ────────────────────────────────── + + /// Resolve the lockfile path inside `.stacker/` relative to the project dir. + pub fn lockfile_path(project_dir: &Path) -> PathBuf { + project_dir.join(".stacker").join(LOCKFILE_NAME) + } + + /// Save the lock to `.stacker/deployment.lock`. + pub fn save(&self, project_dir: &Path) -> Result { + let path = Self::lockfile_path(project_dir); + + // Ensure .stacker/ exists + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).map_err(CliError::Io)?; + } + + let content = serde_yaml::to_string(self).map_err(|e| { + CliError::ConfigValidation(format!("Failed to serialize deployment lock: {}", e)) + })?; + + std::fs::write(&path, &content).map_err(CliError::Io)?; + + Ok(path) + } + + /// Load a deployment lock from `.stacker/deployment.lock`. + /// Returns `None` if the file does not exist. + pub fn load(project_dir: &Path) -> Result, CliError> { + let path = Self::lockfile_path(project_dir); + + if !path.exists() { + return Ok(None); + } + + let content = std::fs::read_to_string(&path).map_err(CliError::Io)?; + + let lock: Self = serde_yaml::from_str(&content).map_err(|e| { + CliError::ConfigValidation(format!( + "Failed to parse deployment lock ({}): {}. Delete the file and redeploy.", + path.display(), + e + )) + })?; + + Ok(Some(lock)) + } + + /// Check whether a lockfile exists for this project. + pub fn exists(project_dir: &Path) -> bool { + Self::lockfile_path(project_dir).exists() + } + + // ── Config update ──────────────────────────────── + + /// Update a StackerConfig's `deploy.server` section from this lock. + /// + /// Used by `--lock` flag and `stacker config lock` to persist + /// server details into stacker.yml for future SSH-based deploys. + pub fn apply_to_config(&self, config: &mut StackerConfig) { + if let Some(ref ip) = self.server_ip { + if ip == "127.0.0.1" { + // Local deploy — nothing to persist in server section + return; + } + + let ssh_key = config + .deploy + .server + .as_ref() + .and_then(|s| s.ssh_key.clone()) + .or_else(|| { + config + .deploy + .cloud + .as_ref() + .and_then(|c| c.ssh_key.clone()) + }); + + config.deploy.server = Some(ServerConfig { + host: ip.clone(), + user: self.ssh_user.clone().unwrap_or_else(|| "root".to_string()), + ssh_key, + port: self.ssh_port.unwrap_or(22), + }); + } + } + + /// Write a StackerConfig back to disk (used after `apply_to_config`). + /// + /// Creates a `.bak` backup before overwriting. + pub fn write_config(config: &StackerConfig, config_path: &Path) -> Result<(), CliError> { + // Backup existing file + if config_path.exists() { + let backup_path = config_path.with_extension("yml.bak"); + std::fs::copy(config_path, &backup_path).map_err(CliError::Io)?; + } + + let yaml = serde_yaml::to_string(config).map_err(|e| { + CliError::ConfigValidation(format!("Failed to serialize config: {}", e)) + })?; + + std::fs::write(config_path, &yaml).map_err(CliError::Io)?; + + Ok(()) + } +} + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// Tests +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn sample_lock() -> DeploymentLock { + DeploymentLock { + target: "cloud".to_string(), + server_ip: Some("203.0.113.42".to_string()), + ssh_user: Some("root".to_string()), + ssh_port: Some(22), + server_name: Some("my-server".to_string()), + deployment_id: Some(123), + project_id: Some(456), + cloud_id: Some(7), + project_name: Some("my-project".to_string()), + deployed_at: "2026-03-06T12:00:00+00:00".to_string(), + } + } + + #[test] + fn round_trip_save_load() { + let tmp = TempDir::new().unwrap(); + let lock = sample_lock(); + + let path = lock.save(tmp.path()).unwrap(); + assert!(path.exists()); + + let loaded = DeploymentLock::load(tmp.path()).unwrap().unwrap(); + assert_eq!(loaded.server_ip, lock.server_ip); + assert_eq!(loaded.deployment_id, lock.deployment_id); + assert_eq!(loaded.project_id, lock.project_id); + assert_eq!(loaded.server_name, lock.server_name); + assert_eq!(loaded.target, "cloud"); + } + + #[test] + fn load_returns_none_when_missing() { + let tmp = TempDir::new().unwrap(); + let result = DeploymentLock::load(tmp.path()).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn exists_detection() { + let tmp = TempDir::new().unwrap(); + assert!(!DeploymentLock::exists(tmp.path())); + + sample_lock().save(tmp.path()).unwrap(); + assert!(DeploymentLock::exists(tmp.path())); + } + + #[test] + fn apply_to_config_sets_server_section() { + let lock = sample_lock(); + let mut config = StackerConfig::default(); + + lock.apply_to_config(&mut config); + + let server = config.deploy.server.unwrap(); + assert_eq!(server.host, "203.0.113.42"); + assert_eq!(server.user, "root"); + assert_eq!(server.port, 22); + } + + #[test] + fn apply_to_config_skips_local() { + let lock = DeploymentLock::for_local(); + let mut config = StackerConfig::default(); + + lock.apply_to_config(&mut config); + + assert!(config.deploy.server.is_none()); + } + + #[test] + fn for_server_captures_config() { + let server_cfg = ServerConfig { + host: "10.0.0.1".to_string(), + user: "deploy".to_string(), + ssh_key: None, + port: 2222, + }; + + let lock = DeploymentLock::for_server(&server_cfg); + assert_eq!(lock.server_ip, Some("10.0.0.1".to_string())); + assert_eq!(lock.ssh_user, Some("deploy".to_string())); + assert_eq!(lock.ssh_port, Some(2222)); + assert_eq!(lock.target, "server"); + } + + #[test] + fn with_server_info_enriches_lock() { + let lock = DeploymentLock::from_result(&DeployResult { + target: DeployTarget::Cloud, + message: "deployed".to_string(), + server_ip: None, + deployment_id: Some(1), + project_id: Some(2), + }); + + let enriched = lock.with_server_info( + Some("1.2.3.4".to_string()), + Some("ubuntu".to_string()), + Some(22), + Some("prod-01".to_string()), + Some(99), + ); + + assert_eq!(enriched.server_ip, Some("1.2.3.4".to_string())); + assert_eq!(enriched.ssh_user, Some("ubuntu".to_string())); + assert_eq!(enriched.server_name, Some("prod-01".to_string())); + assert_eq!(enriched.cloud_id, Some(99)); + } +} diff --git a/src/cli/install_runner.rs b/src/cli/install_runner.rs index b1e2330a..bcbc99da 100644 --- a/src/cli/install_runner.rs +++ b/src/cli/install_runner.rs @@ -93,6 +93,7 @@ pub struct DeployContext { /// Remote deploy overrides from CLI flags. pub project_name_override: Option, pub key_name_override: Option, + pub key_id_override: Option, pub server_name_override: Option, } @@ -491,64 +492,94 @@ impl DeployStrategy for CloudDeploy { }; // Step 2: Resolve cloud credentials - let cloud_id = if let Some(key_ref) = &key_name { - // Look up saved cloud by provider name - eprintln!(" Looking up saved cloud key '{}'...", key_ref); - match client.find_cloud_by_provider(key_ref).await? { + let cloud_id = if let Some(cid) = context.key_id_override { + // --key-id flag: look up by ID (server checks ownership) + eprintln!(" Looking up cloud credentials by id={}...", cid); + match client.get_cloud(cid).await? { Some(c) => { eprintln!( - " Found cloud credentials (id={}, provider={})", - c.id, c.provider + " Found cloud credentials (id={}, name='{}', provider={})", + c.id, c.name, c.provider ); Some(c.id) } None => { - // Try saving current env-var creds under this provider - let provider_str = cloud_cfg.provider.to_string(); - let provider_code = provider_code_for_remote( - &provider_str, + return Err(CliError::DeployFailed { + target: DeployTarget::Cloud, + reason: format!( + "Cloud credential id={} not found (or not owned by you). Use `stacker list clouds` to see available credentials.", + cid + ), + }); + } + } + } else if let Some(key_ref) = &key_name { + // --key flag: look up by name first, fall back to provider match + eprintln!(" Looking up saved cloud key '{}'...", key_ref); + match client.find_cloud_by_name(key_ref).await? { + Some(c) => { + eprintln!( + " Found cloud credentials (id={}, name='{}', provider={})", + c.id, c.name, c.provider ); - let env_creds = - resolve_remote_cloud_credentials(provider_code); - let cloud_token = env_creds - .get("cloud_token") - .and_then(|v| v.as_str()); - let cloud_key = env_creds - .get("cloud_key") - .and_then(|v| v.as_str()); - let cloud_secret = env_creds - .get("cloud_secret") - .and_then(|v| v.as_str()); - - if cloud_token.is_some() - || cloud_key.is_some() - || cloud_secret.is_some() - { + Some(c.id) + } + None => match client.find_cloud_by_provider(key_ref).await? { + Some(c) => { eprintln!( - " No saved cloud '{}', saving from env vars...", - key_ref + " Found cloud credentials by provider (id={}, name='{}', provider={})", + c.id, c.name, c.provider ); - let saved = client - .save_cloud( - provider_code, - cloud_token, - cloud_key, - cloud_secret, - ) - .await?; - eprintln!( - " Saved cloud credentials (id={})", - saved.id + Some(c.id) + } + None => { + // Try saving current env-var creds under this provider + let provider_str = cloud_cfg.provider.to_string(); + let provider_code = provider_code_for_remote( + &provider_str, ); - Some(saved.id) - } else { - return Err(CliError::DeployFailed { - target: DeployTarget::Cloud, - reason: format!( - "Cloud key '{}' not found on server and no cloud credentials in env vars (STACKER_CLOUD_TOKEN, HCLOUD_TOKEN, etc.)", + let env_creds = + resolve_remote_cloud_credentials(provider_code); + let cloud_token = env_creds + .get("cloud_token") + .and_then(|v| v.as_str()); + let cloud_key = env_creds + .get("cloud_key") + .and_then(|v| v.as_str()); + let cloud_secret = env_creds + .get("cloud_secret") + .and_then(|v| v.as_str()); + + if cloud_token.is_some() + || cloud_key.is_some() + || cloud_secret.is_some() + { + eprintln!( + " No saved cloud '{}', saving from env vars...", key_ref - ), - }); + ); + let saved = client + .save_cloud( + provider_code, + cloud_token, + cloud_key, + cloud_secret, + ) + .await?; + eprintln!( + " Saved cloud credentials (id={})", + saved.id + ); + Some(saved.id) + } else { + return Err(CliError::DeployFailed { + target: DeployTarget::Cloud, + reason: format!( + "Cloud key '{}' not found on server and no cloud credentials in env vars (STACKER_CLOUD_TOKEN, HCLOUD_TOKEN, etc.)", + key_ref + ), + }); + } } } } @@ -1472,6 +1503,7 @@ mod tests { image: None, project_name_override: None, key_name_override: None, + key_id_override: None, server_name_override: None, } } @@ -1573,6 +1605,7 @@ mod tests { image: Some("mycompany/install:v3".to_string()), project_name_override: None, key_name_override: None, + key_id_override: None, server_name_override: None, }; assert_eq!(ctx.install_image(), "mycompany/install:v3"); diff --git a/src/cli/mod.rs b/src/cli/mod.rs index da21a1a2..5a613c0d 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -3,6 +3,7 @@ pub mod ai_scanner; pub mod ci_export; pub mod config_parser; pub mod credentials; +pub mod deployment_lock; pub mod detector; pub mod error; pub mod generator; diff --git a/src/cli/service_catalog.rs b/src/cli/service_catalog.rs index a931617c..b1b2d865 100644 --- a/src/cli/service_catalog.rs +++ b/src/cli/service_catalog.rs @@ -137,13 +137,42 @@ impl ServiceCatalog { match lower.as_str() { "wp" | "wordpress" => "wordpress".to_string(), "pg" | "postgresql" | "postgres" => "postgres".to_string(), - "my" | "mysql" | "mariadb" => "mysql".to_string(), + "my" | "mysql" => "mysql".to_string(), + "maria" | "mariadb" => "mariadb".to_string(), "mongo" | "mongodb" => "mongodb".to_string(), "es" | "elastic" | "elasticsearch" => "elasticsearch".to_string(), "mq" | "rabbit" | "rabbitmq" => "rabbitmq".to_string(), "npm" | "nginx-proxy-manager" => "nginx_proxy_manager".to_string(), "pma" | "phpmyadmin" => "phpmyadmin".to_string(), "mh" | "mailhog" => "mailhog".to_string(), + "rc" | "rocketchat" | "rocket.chat" | "rocket-chat" => "rocketchat".to_string(), + "mm" | "mattermost" => "mattermost".to_string(), + "gl" | "gitlab" | "gitlab-ce" | "gitlab_ce" => "gitlab_ce".to_string(), + "wg" | "wireguard" => "wireguard".to_string(), + "vpn" | "openvpn" => "openvpn".to_string(), + "n8n" => "n8n".to_string(), + "dify" => "dify".to_string(), + "ollama" => "ollama".to_string(), + "owui" | "openwebui" | "open-webui" => "openwebui".to_string(), + "vault" => "vault".to_string(), + "dk" | "dockge" => "dockge".to_string(), + "od" | "odoo" => "odoo".to_string(), + "sc" | "suitecrm" => "suitecrm".to_string(), + "rm" | "redmine" => "redmine".to_string(), + "op" | "openproject" => "openproject".to_string(), + "jk" | "jenkins" => "jenkins".to_string(), + "af" | "airflow" => "airflow".to_string(), + "fa" | "fastapi" => "fastapi".to_string(), + "fl" | "flask" => "flask".to_string(), + "dj" | "django" => "django".to_string(), + "lv" | "laravel" => "laravel".to_string(), + "sf" | "symfony" => "symfony".to_string(), + "gin" => "gin".to_string(), + "ror" | "rails" | "rorrestful" => "rorrestful".to_string(), + "wz" | "wazuh" => "wazuh".to_string(), + "f2b" | "fail2ban" => "fail2ban".to_string(), + "nd" | "netdata" => "netdata".to_string(), + "pr" | "postgrest" => "postgrest".to_string(), _ => lower.replace('-', "_"), } } diff --git a/src/cli/stacker_client.rs b/src/cli/stacker_client.rs index b681996a..55ba6eb0 100644 --- a/src/cli/stacker_client.rs +++ b/src/cli/stacker_client.rs @@ -46,6 +46,8 @@ pub struct ProjectInfo { pub struct CloudInfo { pub id: i32, pub user_id: String, + #[serde(default)] + pub name: String, pub provider: String, pub cloud_token: Option, pub cloud_key: Option, @@ -384,6 +386,16 @@ impl StackerClient { Ok(clouds.into_iter().find(|c| c.provider.to_lowercase() == lower)) } + /// Find saved cloud credentials by name (e.g. "my-hetzner", "htz-4"). + pub async fn find_cloud_by_name( + &self, + name: &str, + ) -> Result, CliError> { + let clouds = self.list_clouds().await?; + let lower = name.to_lowercase(); + Ok(clouds.into_iter().find(|c| c.name.to_lowercase() == lower)) + } + /// Find saved cloud credentials by ID. pub async fn get_cloud(&self, cloud_id: i32) -> Result, CliError> { let url = format!("{}/cloud/{}", self.base_url, cloud_id); @@ -431,6 +443,18 @@ impl StackerClient { cloud_token: Option<&str>, cloud_key: Option<&str>, cloud_secret: Option<&str>, + ) -> Result { + self.save_cloud_with_name(provider, None, cloud_token, cloud_key, cloud_secret).await + } + + /// Save cloud credentials with an optional name. + pub async fn save_cloud_with_name( + &self, + provider: &str, + name: Option<&str>, + cloud_token: Option<&str>, + cloud_key: Option<&str>, + cloud_secret: Option<&str>, ) -> Result { let url = format!("{}/cloud", self.base_url); @@ -440,6 +464,12 @@ impl StackerClient { }); if let Some(obj) = payload.as_object_mut() { + if let Some(n) = name { + obj.insert( + "name".to_string(), + serde_json::Value::String(n.to_string()), + ); + } if let Some(t) = cloud_token { obj.insert( "cloud_token".to_string(), @@ -1193,8 +1223,15 @@ pub fn build_project_body(config: &StackerConfig) -> serde_json::Value { "type": "feature", "restart": "always", "custom": true, - "shared_ports": [], + "shared_ports": [ + {"host_port": "80", "container_port": "80"}, + {"host_port": "443", "container_port": "443"}, + {"host_port": "81", "container_port": "81"}, + ], "network": [], + "dockerhub_user": "jc21", + "dockerhub_name": "nginx-proxy-manager", + "dockerhub_tag": "latest", })); } _ => {} @@ -1456,6 +1493,13 @@ mod tests { assert_eq!(features[0]["restart"], "always"); assert!(features[0]["_id"].is_string(), "_id must be present"); assert_eq!(features[0]["custom"], true); + // Image fields must be present so the install service can build a Docker image reference + assert_eq!(features[0]["dockerhub_user"], "jc21"); + assert_eq!(features[0]["dockerhub_name"], "nginx-proxy-manager"); + assert_eq!(features[0]["dockerhub_tag"], "latest"); + // Ports should include the NPM management port (81) + let ports = features[0]["shared_ports"].as_array().unwrap(); + assert_eq!(ports.len(), 3); } #[test] diff --git a/src/console/commands/cli/config.rs b/src/console/commands/cli/config.rs index 98bbf44c..f6955629 100644 --- a/src/console/commands/cli/config.rs +++ b/src/console/commands/cli/config.rs @@ -4,6 +4,7 @@ use std::io::{self, Write}; use crate::cli::config_parser::{ CloudConfig, CloudOrchestrator, CloudProvider, DeployTarget, ServerConfig, StackerConfig, }; +use crate::cli::deployment_lock::DeploymentLock; use crate::cli::error::CliError; use crate::console::commands::cli::init::full_config_reference_example; use crate::console::commands::CallableTrait; @@ -793,6 +794,134 @@ impl CallableTrait for ConfigExampleCommand { } } +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// `stacker config lock` / `stacker config unlock` +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// `stacker config lock [--file stacker.yml]` +/// +/// Reads `.stacker/deployment.lock` and writes the server details +/// (host, user, port, ssh_key) into stacker.yml's `deploy.server` section. +/// Next deploy will auto-detect the server and redeploy via SSH. +pub struct ConfigLockCommand { + pub file: Option, +} + +impl ConfigLockCommand { + pub fn new(file: Option) -> Self { + Self { file } + } +} + +impl CallableTrait for ConfigLockCommand { + fn call(&self) -> Result<(), Box> { + let project_dir = std::env::current_dir()?; + let config_path_str = resolve_config_path(&self.file); + let config_path = project_dir.join(&config_path_str); + + // 1. Load lockfile + let lock = match DeploymentLock::load(&project_dir)? { + Some(l) => l, + None => { + eprintln!("No deployment lock found (.stacker/deployment.lock)."); + eprintln!("Deploy first with `stacker deploy`, then run this command."); + return Ok(()); + } + }; + + // 2. Check it has usable server details + match lock.server_ip.as_deref() { + Some("127.0.0.1") | None => { + eprintln!("Deployment lock exists but has no remote server details."); + if lock.target == "cloud" { + eprintln!("The cloud deployment may still be provisioning."); + eprintln!("Wait for it to complete, then run `stacker deploy --lock` to retry."); + } + return Ok(()); + } + _ => {} + } + + // 3. Load stacker.yml, apply lock, write back + if !config_path.exists() { + return Err(Box::new(CliError::ConfigNotFound { + path: config_path, + })); + } + + let mut config = StackerConfig::from_file(&config_path)?; + lock.apply_to_config(&mut config); + + DeploymentLock::write_config(&config, &config_path)?; + + let ip = lock.server_ip.as_deref().unwrap_or("?"); + let user = lock.ssh_user.as_deref().unwrap_or("root"); + let port = lock.ssh_port.unwrap_or(22); + + eprintln!("✓ stacker.yml updated with server details:"); + eprintln!(" deploy.server.host: {}", ip); + eprintln!(" deploy.server.user: {}", user); + eprintln!(" deploy.server.port: {}", port); + eprintln!(" Backup: {}.bak", config_path_str); + eprintln!(); + eprintln!("Next `stacker deploy` will target this server directly."); + + Ok(()) + } +} + +/// `stacker config unlock [--file stacker.yml]` +/// +/// Removes the `deploy.server` section from stacker.yml, allowing a fresh +/// cloud provision on the next deploy. +pub struct ConfigUnlockCommand { + pub file: Option, +} + +impl ConfigUnlockCommand { + pub fn new(file: Option) -> Self { + Self { file } + } +} + +impl CallableTrait for ConfigUnlockCommand { + fn call(&self) -> Result<(), Box> { + let project_dir = std::env::current_dir()?; + let config_path_str = resolve_config_path(&self.file); + let config_path = project_dir.join(&config_path_str); + + if !config_path.exists() { + return Err(Box::new(CliError::ConfigNotFound { + path: config_path, + })); + } + + let mut config = StackerConfig::from_file(&config_path)?; + + if config.deploy.server.is_none() { + eprintln!("No deploy.server section found in stacker.yml — nothing to unlock."); + return Ok(()); + } + + let old_host = config + .deploy + .server + .as_ref() + .map(|s| s.host.clone()) + .unwrap_or_default(); + + config.deploy.server = None; + + DeploymentLock::write_config(&config, &config_path)?; + + eprintln!("✓ Removed deploy.server section (was: host={})", old_host); + eprintln!(" Backup: {}.bak", config_path_str); + eprintln!(" Next `stacker deploy --target cloud` will provision a new server."); + + Ok(()) + } +} + // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ #[cfg(test)] diff --git a/src/console/commands/cli/deploy.rs b/src/console/commands/cli/deploy.rs index 6248642a..1ddf0186 100644 --- a/src/console/commands/cli/deploy.rs +++ b/src/console/commands/cli/deploy.rs @@ -7,6 +7,7 @@ use crate::cli::ai_client::{ }; use crate::cli::config_parser::{AiProviderType, DeployTarget, ServerConfig, StackerConfig}; use crate::cli::credentials::CredentialsManager; +use crate::cli::deployment_lock::DeploymentLock; use crate::cli::error::CliError; use crate::cli::generator::compose::ComposeDefinition; use crate::cli::generator::dockerfile::DockerfileBuilder; @@ -621,11 +622,17 @@ pub struct DeployCommand { pub project_name: Option, /// Override cloud key name (--key flag) pub key_name: Option, + /// Override cloud key by ID (--key-id flag) + pub key_id: Option, /// Override server name (--server flag) pub server_name: Option, /// Watch deployment progress until complete (--watch / --no-watch). /// `None` means "auto" (watch for cloud, health-check for local). pub watch: Option, + /// Persist server details into stacker.yml after deploy (--lock). + pub lock: bool, + /// Skip smart server pre-check and lockfile hints; force fresh cloud provision (--force-new). + pub force_new: bool, } impl DeployCommand { @@ -642,8 +649,11 @@ impl DeployCommand { force_rebuild, project_name: None, key_name: None, + key_id: None, server_name: None, watch: None, + lock: false, + force_new: false, } } @@ -660,6 +670,12 @@ impl DeployCommand { self } + /// Builder method to set cloud key ID from CLI `--key-id` flag. + pub fn with_key_id(mut self, key_id: Option) -> Self { + self.key_id = key_id; + self + } + /// Builder method to set watch behaviour. /// `--watch` forces watch on; `--no-watch` forces it off. /// Neither flag → auto (cloud=watch, local=health-check). @@ -672,6 +688,18 @@ impl DeployCommand { // else remains None → auto self } + + /// Builder method to set lock behaviour (--lock flag). + pub fn with_lock(mut self, lock: bool) -> Self { + self.lock = lock; + self + } + + /// Builder method to set force-new behaviour (--force-new flag). + pub fn with_force_new(mut self, force_new: bool) -> Self { + self.force_new = force_new; + self + } } /// Parse a deploy target string into `DeployTarget`. @@ -690,6 +718,7 @@ fn parse_deploy_target(s: &str) -> Result { pub struct RemoteDeployOverrides { pub project_name: Option, pub key_name: Option, + pub key_id: Option, pub server_name: Option, } @@ -702,6 +731,7 @@ pub fn run_deploy( target_override: Option<&str>, dry_run: bool, force_rebuild: bool, + force_new: bool, executor: &dyn CommandExecutor, remote_overrides: &RemoteDeployOverrides, ) -> Result { @@ -724,7 +754,8 @@ pub fn run_deploy( // is defined with a host, try SSH connectivity first. // If the server is reachable, automatically switch to Server target. // If not, show diagnostics and abort so the user can fix or remove the section. - if deploy_target == DeployTarget::Cloud { + // Skipped when --force-new is set (user explicitly wants a fresh cloud provision). + if deploy_target == DeployTarget::Cloud && !force_new { if let Some(ref server_cfg) = config.deploy.server { eprintln!(" Found deploy.server section (host={}). Checking SSH connectivity...", server_cfg.host); @@ -778,6 +809,18 @@ pub fn run_deploy( }); } } + } else if DeploymentLock::exists(project_dir) { + // No deploy.server in config, but a lockfile exists from a prior deploy. + // Inform the user without auto-switching — they must opt in. + if let Ok(Some(lock)) = DeploymentLock::load(project_dir) { + if let Some(ref ip) = lock.server_ip { + if ip != "127.0.0.1" { + eprintln!(" ℹ Found previous deployment to {} (from .stacker/deployment.lock)", ip); + eprintln!(" To redeploy to the same server, run: stacker config lock"); + eprintln!(" To provision a new server instead: stacker deploy --force-new"); + } + } + } } } @@ -880,6 +923,7 @@ pub fn run_deploy( .and_then(|cloud| cloud.install_image.clone()), project_name_override: remote_overrides.project_name.clone(), key_name_override: remote_overrides.key_name.clone(), + key_id_override: remote_overrides.key_id, server_name_override: remote_overrides.server_name.clone(), }; @@ -897,6 +941,7 @@ impl CallableTrait for DeployCommand { let remote_overrides = RemoteDeployOverrides { project_name: self.project_name.clone(), key_name: self.key_name.clone(), + key_id: self.key_id, server_name: self.server_name.clone(), }; @@ -909,6 +954,7 @@ impl CallableTrait for DeployCommand { self.target.as_deref(), self.dry_run, self.force_rebuild, + self.force_new, &executor, &remote_overrides, ); @@ -957,10 +1003,166 @@ impl CallableTrait for DeployCommand { _ => {} } + // ── Deployment lock: persist deployment context ── + self.save_deployment_lock(&project_dir, &result)?; + Ok(()) } } +impl DeployCommand { + /// Save deployment context to `.stacker/deployment.lock` after a successful deploy. + /// + /// For cloud deploys, tries to fetch the provisioned server's details from the + /// Stacker API (IP, SSH user/port, server name) so that subsequent deploys can + /// target the same server via the smart pre-check. + /// + /// When `--lock` is set, also writes the server details into `stacker.yml`. + fn save_deployment_lock( + &self, + project_dir: &Path, + result: &DeployResult, + ) -> Result<(), Box> { + // Build the initial lock from the deploy result + let mut lock = match result.target { + DeployTarget::Local => DeploymentLock::for_local(), + DeployTarget::Server => { + // For server deploys, read server config from stacker.yml + let config_path = match &self.file { + Some(f) => project_dir.join(f), + None => project_dir.join(DEFAULT_CONFIG_FILE), + }; + if let Ok(config) = StackerConfig::from_file(&config_path) { + if let Some(ref server_cfg) = config.deploy.server { + DeploymentLock::for_server(server_cfg) + } else { + DeploymentLock::from_result(result) + } + } else { + DeploymentLock::from_result(result) + } + } + DeployTarget::Cloud => { + let mut l = DeploymentLock::from_result(result) + .with_project_name(self.project_name.clone()); + + // Try to fetch provisioned server details from the Stacker API + if let Some(project_id) = result.project_id { + match fetch_server_for_project(project_id as i32) { + Ok(Some(info)) => { + l = l.with_server_info( + info.srv_ip.clone(), + info.ssh_user.clone(), + info.ssh_port.map(|p| p as u16), + info.name.clone(), + info.cloud_id, + ); + if let Some(ref ip) = info.srv_ip { + eprintln!(" Server details: {} ({}@{}:{})", + info.name.as_deref().unwrap_or("unnamed"), + info.ssh_user.as_deref().unwrap_or("root"), + ip, + info.ssh_port.unwrap_or(22), + ); + } + } + Ok(None) => { + eprintln!(" ℹ Server details not yet available (may still be provisioning)."); + } + Err(e) => { + eprintln!(" ⚠ Could not fetch server details: {}", e); + } + } + } + + l + } + }; + + // Always set project_name if available from CLI flag + if self.project_name.is_some() { + lock = lock.with_project_name(self.project_name.clone()); + } + + // Save lockfile + match lock.save(project_dir) { + Ok(path) => { + eprintln!(" Deployment context saved to {}", path.display()); + } + Err(e) => { + eprintln!(" ⚠ Failed to save deployment lock: {}", e); + } + } + + // If --lock flag is set, also update stacker.yml with server details + if self.lock { + let config_path = match &self.file { + Some(f) => project_dir.join(f), + None => project_dir.join(DEFAULT_CONFIG_FILE), + }; + + if lock.server_ip.is_some() + && lock.server_ip.as_deref() != Some("127.0.0.1") + { + match StackerConfig::from_file(&config_path) { + Ok(mut config) => { + lock.apply_to_config(&mut config); + match DeploymentLock::write_config(&config, &config_path) { + Ok(()) => { + eprintln!(" ✓ stacker.yml updated with server details (backup: stacker.yml.bak)"); + eprintln!(" Next deploy will target this server directly."); + } + Err(e) => { + eprintln!(" ⚠ Failed to update stacker.yml: {}", e); + eprintln!(" Run `stacker config lock` to retry."); + } + } + } + Err(e) => { + eprintln!(" ⚠ Failed to read stacker.yml for update: {}", e); + } + } + } else { + eprintln!(" ℹ --lock: No remote server details to persist (local deploy or server IP not yet available)."); + eprintln!(" Run `stacker config lock` after the server is provisioned."); + } + } + + Ok(()) + } +} + +// ── Fetch server details from Stacker API by project ID ── + +/// After a cloud deploy completes, look up the provisioned server's details +/// (IP, SSH user, port, name) from the Stacker server API. +fn fetch_server_for_project( + project_id: i32, +) -> Result, Box> { + let cred_manager = CredentialsManager::with_default_store(); + let creds = cred_manager.require_valid_token("server lookup")?; + + let base_url = crate::cli::install_runner::normalize_stacker_server_url( + stacker_client::DEFAULT_STACKER_URL, + ); + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build()?; + + rt.block_on(async { + let client = StackerClient::new(&base_url, &creds.access_token); + let servers = client.list_servers().await?; + + // Find the server linked to this project + let server = servers + .into_iter() + .find(|s| s.project_id == project_id && s.srv_ip.is_some()); + + Ok(server) + }) +} + // ── Local container health-check after `docker compose up` ─── /// Poll `docker compose ps` until all containers are running/healthy @@ -1285,7 +1487,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); // Generated files should exist @@ -1303,7 +1505,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); // Custom Dockerfile should not be overwritten @@ -1324,7 +1526,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); // .stacker/docker-compose.yml should NOT be generated @@ -1341,7 +1543,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); } @@ -1353,7 +1555,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); // No Dockerfile should be generated (using image) @@ -1367,7 +1569,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, None, true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, None, true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_err()); let err = format!("{}", result.unwrap_err()); @@ -1388,7 +1590,7 @@ mod tests { let executor = MockExecutor::success(); // This should fail at validation since no credentials exist - let result = run_deploy(dir.path(), None, None, true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, None, true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_err()); } @@ -1400,7 +1602,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, None, true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, None, true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_err()); let err = format!("{}", result.unwrap_err()); @@ -1413,7 +1615,7 @@ mod tests { let dir = TempDir::new().unwrap(); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), None, None, true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, None, true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_err()); let err = format!("{}", result.unwrap_err()); @@ -1429,7 +1631,7 @@ mod tests { ]); let executor = MockExecutor::success(); - let result = run_deploy(dir.path(), Some("custom.yml"), Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), Some("custom.yml"), Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); } @@ -1442,15 +1644,15 @@ mod tests { let executor = MockExecutor::success(); // First deploy creates files - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); // Second deploy without force_rebuild should succeed (reuses existing files) - let result2 = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result2 = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result2.is_ok()); // With force_rebuild should also succeed (regenerates files) - let result3 = run_deploy(dir.path(), None, Some("local"), true, true, &executor, &RemoteDeployOverrides::default()); + let result3 = run_deploy(dir.path(), None, Some("local"), true, true, false, &executor, &RemoteDeployOverrides::default()); assert!(result3.is_ok()); } @@ -1483,7 +1685,7 @@ mod tests { let executor = MockExecutor::success(); // Dry-run should succeed (hooks are just noted, not executed in dry-run) - let result = run_deploy(dir.path(), None, Some("local"), true, false, &executor, &RemoteDeployOverrides::default()); + let result = run_deploy(dir.path(), None, Some("local"), true, false, false, &executor, &RemoteDeployOverrides::default()); assert!(result.is_ok()); } diff --git a/src/console/commands/cli/list.rs b/src/console/commands/cli/list.rs index e847cce9..898021a3 100644 --- a/src/console/commands/cli/list.rs +++ b/src/console/commands/cli/list.rs @@ -247,3 +247,92 @@ fn truncate(s: &str, max_len: usize) -> String { s.to_string() } } + +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +// list clouds +// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +/// `stacker list clouds [--json]` +/// +/// Lists all saved cloud credentials for the authenticated user. +/// Shows ID, name, and provider. Tokens are masked for security. +pub struct ListCloudsCommand { + pub json: bool, +} + +impl ListCloudsCommand { + pub fn new(json: bool) -> Self { + Self { json } + } +} + +impl CallableTrait for ListCloudsCommand { + fn call(&self) -> Result<(), Box> { + let json = self.json; + + let cred_manager = CredentialsManager::with_default_store(); + let creds = cred_manager.require_valid_token("list clouds")?; + let base_url = stacker_client::DEFAULT_STACKER_URL.to_string(); + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .map_err(|e| CliError::ConfigValidation(format!("Failed to create async runtime: {}", e)))?; + + rt.block_on(async { + let client = StackerClient::new(&base_url, &creds.access_token); + let clouds = client.list_clouds().await?; + + if clouds.is_empty() { + eprintln!("No saved cloud credentials found."); + eprintln!("Cloud credentials are saved automatically when you deploy with env vars,"); + eprintln!("or via: stacker deploy --target cloud (with HCLOUD_TOKEN, etc. exported)."); + return Ok(()); + } + + if json { + // Mask sensitive fields for JSON output + let safe: Vec = clouds + .iter() + .map(|c| { + serde_json::json!({ + "id": c.id, + "name": c.name, + "provider": c.provider, + "has_token": c.cloud_token.is_some(), + "has_key": c.cloud_key.is_some(), + "has_secret": c.cloud_secret.is_some(), + }) + }) + .collect(); + println!("{}", serde_json::to_string_pretty(&safe)?); + } else { + println!( + "{:<6} {:<24} {:<12} {:<10} {:<10} {:<10}", + "ID", "NAME", "PROVIDER", "TOKEN", "KEY", "SECRET" + ); + println!("{}", "─".repeat(74)); + + for c in &clouds { + let has_token = if c.cloud_token.is_some() { "✓" } else { "-" }; + let has_key = if c.cloud_key.is_some() { "✓" } else { "-" }; + let secret_indicator = "*"; + println!( + "{:<6} {:<24} {:<12} {:<10} {:<10} {:<10}", + c.id, + truncate(&c.name, 22), + &c.provider, + has_token, + has_key, + secret_indicator, + ); + } + + eprintln!("\n{} cloud credential(s) total.", clouds.len()); + eprintln!("Use with: stacker deploy --key or --key-id "); + } + + Ok(()) + }) + } +} diff --git a/src/console/main.rs b/src/console/main.rs index 76f7f2d3..01dfadd4 100644 --- a/src/console/main.rs +++ b/src/console/main.rs @@ -133,6 +133,9 @@ enum StackerCommands { /// Name of saved cloud credential to reuse #[arg(long, value_name = "KEY_NAME")] key: Option, + /// ID of saved cloud credential to reuse + #[arg(long, value_name = "CLOUD_ID")] + key_id: Option, /// Name of saved server to reuse #[arg(long, value_name = "SERVER_NAME")] server: Option, @@ -209,6 +212,16 @@ enum StackerConfigCommands { #[arg(long, default_value_t = true)] interactive: bool, }, + /// Persist deployment lock into stacker.yml (writes deploy.server from last deploy) + Lock { + #[arg(long, value_name = "FILE")] + file: Option, + }, + /// Remove deploy.server section from stacker.yml (allows fresh cloud provision) + Unlock { + #[arg(long, value_name = "FILE")] + file: Option, + }, /// Guided setup helpers Setup { #[command(subcommand)] @@ -345,6 +358,7 @@ fn get_command(command: Commands) -> Result Ok(Box::new( stacker::console::commands::cli::deploy::DeployCommand::new( @@ -353,7 +367,8 @@ fn get_command(command: Commands) -> Result Result Ok(Box::new( stacker::console::commands::cli::config::ConfigFixCommand::new(file, interactive), )), + StackerConfigCommands::Lock { file } => Ok(Box::new( + stacker::console::commands::cli::config::ConfigLockCommand::new(file), + )), + StackerConfigCommands::Unlock { file } => Ok(Box::new( + stacker::console::commands::cli::config::ConfigUnlockCommand::new(file), + )), StackerConfigCommands::Setup { command } => match command { StackerConfigSetupCommands::Cloud { file } => Ok(Box::new( stacker::console::commands::cli::config::ConfigSetupCloudCommand::new(file), diff --git a/src/db/cloud.rs b/src/db/cloud.rs index 0e06f1b7..8e60c674 100644 --- a/src/db/cloud.rs +++ b/src/db/cloud.rs @@ -44,10 +44,21 @@ pub async fn fetch_by_user(pool: &PgPool, user_id: &str) -> Result Result { let query_span = tracing::info_span!("Saving user's cloud data into the database"); + + // If no name provided, we'll generate a default after insert (need the ID) + let has_name = !cloud.name.is_empty(); + let insert_name = if has_name { + cloud.name.clone() + } else { + // Temporary placeholder; will be updated below + format!("{}-0", cloud.provider) + }; + sqlx::query!( r#" INSERT INTO cloud ( user_id, + name, provider, cloud_token, cloud_key, @@ -56,10 +67,11 @@ pub async fn insert(pool: &PgPool, mut cloud: models::Cloud) -> Result Result Result { @@ -87,17 +106,19 @@ pub async fn update(pool: &PgPool, mut cloud: models::Cloud) -> Result, #[serde(skip_serializing_if = "Option::is_none")] pub project_id: Option, + /// Human-friendly name for this cloud credential (e.g. "my-hetzner"). + /// Auto-generated as "{provider}-{id}" if not provided. + #[serde(default)] + pub name: Option, #[validate(min_length = 2)] #[validate(max_length = 50)] pub provider: String, @@ -130,6 +134,8 @@ impl Into for &CloudForm { let mut cloud = models::Cloud::default(); cloud.provider = self.provider.clone(); cloud.user_id = self.user_id.clone().unwrap(); + // Name will be set after insert if not provided (default: "{provider}-{id}") + cloud.name = self.name.clone().unwrap_or_default(); if Some(true) == self.save_token { let mut secret = Secret::new(); @@ -158,6 +164,7 @@ impl Into for models::Cloud { fn into(self) -> CloudForm { let mut form = CloudForm::default(); form.provider = self.provider.clone(); + form.name = Some(self.name.clone()); if Some(true) == self.save_token { let mut secret = Secret::new(); diff --git a/src/mcp/tools/cloud.rs b/src/mcp/tools/cloud.rs index 31c38556..32c12673 100644 --- a/src/mcp/tools/cloud.rs +++ b/src/mcp/tools/cloud.rs @@ -242,6 +242,7 @@ impl ToolHandler for AddCloudTool { let cloud = models::Cloud { id: 0, // Will be set by DB user_id: context.user.id.clone(), + name: String::new(), // auto-generated by db::cloud::insert as "{provider}-{id}" provider: args.provider.clone(), cloud_token: args.cloud_token, cloud_key: args.cloud_key, diff --git a/src/middleware/authentication/method/f_hmac.rs b/src/middleware/authentication/method/f_hmac.rs index f41aafdd..7e8ff810 100644 --- a/src/middleware/authentication/method/f_hmac.rs +++ b/src/middleware/authentication/method/f_hmac.rs @@ -97,8 +97,12 @@ pub async fn try_hmac(req: &mut ServiceRequest) -> Result { None => {} } + // Use "client" as the Casbin subject so it matches the Casbin policies + // (e.g. `p, client, /api/v1/agent/register, POST`). + // Previously this was `client_id.to_string()` which never matched any + // group mapping and caused 403 for all HMAC-authenticated requests. let accesscontrol_vals = actix_casbin_auth::CasbinVals { - subject: client_id.to_string(), + subject: "client".to_string(), domain: None, }; if req.extensions_mut().insert(accesscontrol_vals).is_some() { diff --git a/src/models/cloud.rs b/src/models/cloud.rs index e2bf986a..2108bc61 100644 --- a/src/models/cloud.rs +++ b/src/models/cloud.rs @@ -5,6 +5,7 @@ use serde_derive::{Deserialize, Serialize}; pub struct Cloud { pub id: i32, pub user_id: String, + pub name: String, pub provider: String, pub cloud_token: Option, pub cloud_key: Option, @@ -38,6 +39,7 @@ impl std::fmt::Display for Cloud { impl Cloud { pub fn new( user_id: String, + name: String, provider: String, cloud_token: Option, cloud_key: Option, @@ -47,6 +49,7 @@ impl Cloud { Self { id: 0, user_id, + name, provider, cloud_token, cloud_key, @@ -62,6 +65,7 @@ impl Default for Cloud { fn default() -> Self { Cloud { id: 0, + name: "".to_string(), provider: "".to_string(), user_id: "".to_string(), cloud_key: Default::default(), diff --git a/src/services/project_app_service.rs b/src/services/project_app_service.rs index 8ec8632c..b2ca341c 100644 --- a/src/services/project_app_service.rs +++ b/src/services/project_app_service.rs @@ -355,7 +355,6 @@ impl SyncSummary { #[cfg(test)] mod tests { - use super::*; use crate::models::ProjectApp; #[test]