diff --git a/.env b/.env index 465ce51e..837e8826 100644 --- a/.env +++ b/.env @@ -21,4 +21,8 @@ STACKER_AGENT_POLL_INTERVAL_SECS=2 # Deployment Settings # Base directory for deployments on target servers -DEFAULT_DEPLOY_DIR=/home/trydirect \ No newline at end of file +DEFAULT_DEPLOY_DIR=/home/trydirect + +# Webhook to User Service (marketplace approval flow) +URL_SERVER_USER=http://user:4100 +STACKER_SERVICE_TOKEN=changeme \ No newline at end of file diff --git a/.sqlx/query-fde61fd37b0e4c325e9dd4817a5ccc4ed8a4ffd2175fce842dd5d33545ba63f2.json b/.sqlx/query-09211b75cd521772b4a9ca806efa60d355d2479811e2bb55d4f6b8163c7ad724.json similarity index 89% rename from .sqlx/query-fde61fd37b0e4c325e9dd4817a5ccc4ed8a4ffd2175fce842dd5d33545ba63f2.json rename to .sqlx/query-09211b75cd521772b4a9ca806efa60d355d2479811e2bb55d4f6b8163c7ad724.json index 8a0765d1..dbd107d9 100644 --- a/.sqlx/query-fde61fd37b0e4c325e9dd4817a5ccc4ed8a4ffd2175fce842dd5d33545ba63f2.json +++ b/.sqlx/query-09211b75cd521772b4a9ca806efa60d355d2479811e2bb55d4f6b8163c7ad724.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO project_app (\n project_id, code, name, image, environment, ports, volumes,\n domain, ssl_enabled, resources, restart_policy, command,\n entrypoint, networks, depends_on, healthcheck, labels,\n config_files, template_source, enabled, deploy_order, parent_app_code, created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, NOW(), NOW())\n RETURNING *\n ", + "query": "\n INSERT INTO project_app (\n project_id, code, name, image, environment, ports, volumes,\n domain, ssl_enabled, resources, restart_policy, command,\n entrypoint, networks, depends_on, healthcheck, labels,\n config_files, template_source, enabled, deploy_order, parent_app_code,\n deployment_id, created_at, updated_at\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, NOW(), NOW())\n RETURNING *\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "parent_app_code", "type_info": "Varchar" + }, + { + "ordinal": 29, + "name": "deployment_id", + "type_info": "Int4" } ], "parameters": { @@ -172,7 +177,8 @@ "Varchar", "Bool", "Int4", - "Varchar" + "Varchar", + "Int4" ] }, "nullable": [ @@ -204,8 +210,9 @@ true, true, true, + true, true ] }, - "hash": "fde61fd37b0e4c325e9dd4817a5ccc4ed8a4ffd2175fce842dd5d33545ba63f2" + "hash": "09211b75cd521772b4a9ca806efa60d355d2479811e2bb55d4f6b8163c7ad724" } diff --git a/.sqlx/query-d81dbcf77d096403614b80165d66388884b133c79da6ed1a5809a3ca64f48f97.json b/.sqlx/query-1108f78f1238d79a63ed5872b40a61e5bf9278b220373771cecb87850002e58e.json similarity index 64% rename from .sqlx/query-d81dbcf77d096403614b80165d66388884b133c79da6ed1a5809a3ca64f48f97.json rename to .sqlx/query-1108f78f1238d79a63ed5872b40a61e5bf9278b220373771cecb87850002e58e.json index 769d0a5c..78373b6c 100644 --- a/.sqlx/query-d81dbcf77d096403614b80165d66388884b133c79da6ed1a5809a3ca64f48f97.json +++ b/.sqlx/query-1108f78f1238d79a63ed5872b40a61e5bf9278b220373771cecb87850002e58e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "UPDATE stack_template SET \n name = COALESCE($2, name),\n short_description = COALESCE($3, short_description),\n long_description = COALESCE($4, long_description),\n category_id = COALESCE((SELECT id FROM stack_category WHERE name = $5), category_id),\n tags = COALESCE($6, tags),\n tech_stack = COALESCE($7, tech_stack)\n WHERE id = $1::uuid", + "query": "UPDATE stack_template SET \n name = COALESCE($2, name),\n short_description = COALESCE($3, short_description),\n long_description = COALESCE($4, long_description),\n category_id = COALESCE((SELECT id FROM stack_category WHERE name = $5), category_id),\n tags = COALESCE($6, tags),\n tech_stack = COALESCE($7, tech_stack),\n price = COALESCE($8, price),\n billing_cycle = COALESCE($9, billing_cycle),\n currency = COALESCE($10, currency)\n WHERE id = $1::uuid", "describe": { "columns": [], "parameters": { @@ -11,10 +11,13 @@ "Text", "Text", "Jsonb", - "Jsonb" + "Jsonb", + "Float8", + "Varchar", + "Varchar" ] }, "nullable": [] }, - "hash": "d81dbcf77d096403614b80165d66388884b133c79da6ed1a5809a3ca64f48f97" + "hash": "1108f78f1238d79a63ed5872b40a61e5bf9278b220373771cecb87850002e58e" } diff --git a/.sqlx/query-e5956a76c15941c58fc9acb3886c9d8ed8688d70ac5fcceaf41e1671f75dbaa8.json b/.sqlx/query-184840fbb1e0b2fd96590d10ac17fdfa93456f28c3a62c4a1ac78bcf69d58b09.json similarity index 74% rename from .sqlx/query-e5956a76c15941c58fc9acb3886c9d8ed8688d70ac5fcceaf41e1671f75dbaa8.json rename to .sqlx/query-184840fbb1e0b2fd96590d10ac17fdfa93456f28c3a62c4a1ac78bcf69d58b09.json index ee20b465..7a55df12 100644 --- a/.sqlx/query-e5956a76c15941c58fc9acb3886c9d8ed8688d70ac5fcceaf41e1671f75dbaa8.json +++ b/.sqlx/query-184840fbb1e0b2fd96590d10ac17fdfa93456f28c3a62c4a1ac78bcf69d58b09.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.required_plan_name,\n t.created_at,\n t.updated_at,\n t.approved_at\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.status = 'submitted'\n ORDER BY t.created_at ASC", + "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.required_plan_name,\n t.price,\n t.billing_cycle,\n t.currency,\n t.created_at,\n t.updated_at,\n t.approved_at\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.status IN ('submitted', 'approved')\n ORDER BY \n CASE t.status\n WHEN 'submitted' THEN 0\n WHEN 'approved' THEN 1\n END,\n t.created_at ASC", "describe": { "columns": [ { @@ -85,16 +85,31 @@ }, { "ordinal": 16, + "name": "price", + "type_info": "Float8" + }, + { + "ordinal": 17, + "name": "billing_cycle", + "type_info": "Varchar" + }, + { + "ordinal": 18, + "name": "currency", + "type_info": "Varchar" + }, + { + "ordinal": 19, "name": "created_at", "type_info": "Timestamptz" }, { - "ordinal": 17, + "ordinal": 20, "name": "updated_at", "type_info": "Timestamptz" }, { - "ordinal": 18, + "ordinal": 21, "name": "approved_at", "type_info": "Timestamptz" } @@ -121,8 +136,11 @@ true, true, true, + true, + true, + true, true ] }, - "hash": "e5956a76c15941c58fc9acb3886c9d8ed8688d70ac5fcceaf41e1671f75dbaa8" + "hash": "184840fbb1e0b2fd96590d10ac17fdfa93456f28c3a62c4a1ac78bcf69d58b09" } diff --git a/.sqlx/query-1ee7eb9b87cfcc6ba3d2bbc6351277ac4a7f94d9f0f448b5549e30fc6cc66e19.json b/.sqlx/query-1ee7eb9b87cfcc6ba3d2bbc6351277ac4a7f94d9f0f448b5549e30fc6cc66e19.json new file mode 100644 index 00000000..be92bbe8 --- /dev/null +++ b/.sqlx/query-1ee7eb9b87cfcc6ba3d2bbc6351277ac4a7f94d9f0f448b5549e30fc6cc66e19.json @@ -0,0 +1,197 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT * FROM project_app \n WHERE project_id = $1 AND deployment_id = $2\n ORDER BY deploy_order ASC NULLS LAST, id ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "project_id", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "code", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "image", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "environment", + "type_info": "Jsonb" + }, + { + "ordinal": 6, + "name": "ports", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "volumes", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "domain", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "ssl_enabled", + "type_info": "Bool" + }, + { + "ordinal": 10, + "name": "resources", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "restart_policy", + "type_info": "Varchar" + }, + { + "ordinal": 12, + "name": "command", + "type_info": "Text" + }, + { + "ordinal": 13, + "name": "entrypoint", + "type_info": "Text" + }, + { + "ordinal": 14, + "name": "networks", + "type_info": "Jsonb" + }, + { + "ordinal": 15, + "name": "depends_on", + "type_info": "Jsonb" + }, + { + "ordinal": 16, + "name": "healthcheck", + "type_info": "Jsonb" + }, + { + "ordinal": 17, + "name": "labels", + "type_info": "Jsonb" + }, + { + "ordinal": 18, + "name": "enabled", + "type_info": "Bool" + }, + { + "ordinal": 19, + "name": "deploy_order", + "type_info": "Int4" + }, + { + "ordinal": 20, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 21, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 22, + "name": "config_version", + "type_info": "Int4" + }, + { + "ordinal": 23, + "name": "vault_synced_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 24, + "name": "vault_sync_version", + "type_info": "Int4" + }, + { + "ordinal": 25, + "name": "config_hash", + "type_info": "Varchar" + }, + { + "ordinal": 26, + "name": "config_files", + "type_info": "Jsonb" + }, + { + "ordinal": 27, + "name": "template_source", + "type_info": "Varchar" + }, + { + "ordinal": 28, + "name": "parent_app_code", + "type_info": "Varchar" + }, + { + "ordinal": 29, + "name": "deployment_id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false, + true, + true, + true, + true, + true, + true, + true + ] + }, + "hash": "1ee7eb9b87cfcc6ba3d2bbc6351277ac4a7f94d9f0f448b5549e30fc6cc66e19" +} diff --git a/.sqlx/query-722e059fca26aa3be81451ef5e266cc32d0e3ebc0611bd69013b6c3aa240b674.json b/.sqlx/query-27d5c5d688f0ee38fb6db48ef062b31a3f661b0d7351d648f24f277467d5ca2d.json similarity index 81% rename from .sqlx/query-722e059fca26aa3be81451ef5e266cc32d0e3ebc0611bd69013b6c3aa240b674.json rename to .sqlx/query-27d5c5d688f0ee38fb6db48ef062b31a3f661b0d7351d648f24f277467d5ca2d.json index 65bb611f..9595775f 100644 --- a/.sqlx/query-722e059fca26aa3be81451ef5e266cc32d0e3ebc0611bd69013b6c3aa240b674.json +++ b/.sqlx/query-27d5c5d688f0ee38fb6db48ef062b31a3f661b0d7351d648f24f277467d5ca2d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.created_at,\n t.updated_at,\n t.approved_at,\n t.required_plan_name\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.id = $1", + "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.created_at,\n t.updated_at,\n t.approved_at,\n t.required_plan_name,\n t.price,\n t.billing_cycle,\n t.currency\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.id = $1", "describe": { "columns": [ { @@ -97,6 +97,21 @@ "ordinal": 18, "name": "required_plan_name", "type_info": "Varchar" + }, + { + "ordinal": 19, + "name": "price", + "type_info": "Float8" + }, + { + "ordinal": 20, + "name": "billing_cycle", + "type_info": "Varchar" + }, + { + "ordinal": 21, + "name": "currency", + "type_info": "Varchar" } ], "parameters": { @@ -123,8 +138,11 @@ true, true, true, + true, + true, + true, true ] }, - "hash": "722e059fca26aa3be81451ef5e266cc32d0e3ebc0611bd69013b6c3aa240b674" + "hash": "27d5c5d688f0ee38fb6db48ef062b31a3f661b0d7351d648f24f277467d5ca2d" } diff --git a/.sqlx/query-39d0d9c946cbd9cdacff0d59a39a6c331091879a27faeabbebf1602c797b22ea.json b/.sqlx/query-39d0d9c946cbd9cdacff0d59a39a6c331091879a27faeabbebf1602c797b22ea.json deleted file mode 100644 index af16b9c0..00000000 --- a/.sqlx/query-39d0d9c946cbd9cdacff0d59a39a6c331091879a27faeabbebf1602c797b22ea.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO server (\n user_id,\n project_id,\n region,\n zone,\n server,\n os,\n disk_type,\n created_at,\n updated_at,\n srv_ip,\n ssh_user,\n ssh_port,\n vault_key_path,\n connection_mode,\n key_status,\n name\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, NOW() at time zone 'utc',NOW() at time zone 'utc', $8, $9, $10, $11, $12, $13, $14)\n RETURNING id;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Varchar", - "Int4", - "Varchar", - "Varchar", - "Varchar", - "Varchar", - "Varchar", - "Varchar", - "Varchar", - "Int4", - "Varchar", - "Varchar", - "Varchar", - "Varchar" - ] - }, - "nullable": [ - false - ] - }, - "hash": "39d0d9c946cbd9cdacff0d59a39a6c331091879a27faeabbebf1602c797b22ea" -} diff --git a/.sqlx/query-4ed4ce17b28e36898d9afabb96b7043ceee664f67752c41bf06df6e51ed69362.json b/.sqlx/query-463efe189d11f943d76f806de8471446f52bd00706421b02b4dacc0140c574c1.json similarity index 67% rename from .sqlx/query-4ed4ce17b28e36898d9afabb96b7043ceee664f67752c41bf06df6e51ed69362.json rename to .sqlx/query-463efe189d11f943d76f806de8471446f52bd00706421b02b4dacc0140c574c1.json index c3f8828e..f3cf179e 100644 --- a/.sqlx/query-4ed4ce17b28e36898d9afabb96b7043ceee664f67752c41bf06df6e51ed69362.json +++ b/.sqlx/query-463efe189d11f943d76f806de8471446f52bd00706421b02b4dacc0140c574c1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO stack_template (\n creator_user_id, creator_name, name, slug,\n short_description, long_description, category_id,\n tags, tech_stack, status\n ) VALUES ($1,$2,$3,$4,$5,$6,(SELECT id FROM stack_category WHERE name = $7),$8,$9,'draft')\n RETURNING \n id,\n creator_user_id,\n creator_name,\n name,\n slug,\n short_description,\n long_description,\n (SELECT name FROM stack_category WHERE id = category_id) AS \"category_code?\",\n product_id,\n tags,\n tech_stack,\n status,\n is_configurable,\n view_count,\n deploy_count,\n required_plan_name,\n created_at,\n updated_at,\n approved_at\n ", + "query": "INSERT INTO stack_template (\n creator_user_id, creator_name, name, slug,\n short_description, long_description, category_id,\n tags, tech_stack, status, price, billing_cycle, currency\n ) VALUES ($1,$2,$3,$4,$5,$6,(SELECT id FROM stack_category WHERE name = $7),$8,$9,'draft',$10,$11,$12)\n RETURNING \n id,\n creator_user_id,\n creator_name,\n name,\n slug,\n short_description,\n long_description,\n (SELECT name FROM stack_category WHERE id = category_id) AS \"category_code?\",\n product_id,\n tags,\n tech_stack,\n status,\n is_configurable,\n view_count,\n deploy_count,\n required_plan_name,\n price,\n billing_cycle,\n currency,\n created_at,\n updated_at,\n approved_at\n ", "describe": { "columns": [ { @@ -85,16 +85,31 @@ }, { "ordinal": 16, + "name": "price", + "type_info": "Float8" + }, + { + "ordinal": 17, + "name": "billing_cycle", + "type_info": "Varchar" + }, + { + "ordinal": 18, + "name": "currency", + "type_info": "Varchar" + }, + { + "ordinal": 19, "name": "created_at", "type_info": "Timestamptz" }, { - "ordinal": 17, + "ordinal": 20, "name": "updated_at", "type_info": "Timestamptz" }, { - "ordinal": 18, + "ordinal": 21, "name": "approved_at", "type_info": "Timestamptz" } @@ -109,7 +124,10 @@ "Text", "Text", "Jsonb", - "Jsonb" + "Jsonb", + "Float8", + "Varchar", + "Varchar" ] }, "nullable": [ @@ -131,8 +149,11 @@ true, true, true, + true, + true, + true, true ] }, - "hash": "4ed4ce17b28e36898d9afabb96b7043ceee664f67752c41bf06df6e51ed69362" + "hash": "463efe189d11f943d76f806de8471446f52bd00706421b02b4dacc0140c574c1" } diff --git a/.sqlx/query-467365894a7f9a0888584e8879cac289299f4d03539b9c746324cd183e265553.json b/.sqlx/query-467365894a7f9a0888584e8879cac289299f4d03539b9c746324cd183e265553.json index f2a83075..b36598b5 100644 --- a/.sqlx/query-467365894a7f9a0888584e8879cac289299f4d03539b9c746324cd183e265553.json +++ b/.sqlx/query-467365894a7f9a0888584e8879cac289299f4d03539b9c746324cd183e265553.json @@ -147,6 +147,11 @@ "ordinal": 28, "name": "parent_app_code", "type_info": "Varchar" + }, + { + "ordinal": 29, + "name": "deployment_id", + "type_info": "Int4" } ], "parameters": { @@ -183,6 +188,7 @@ true, true, true, + true, true ] }, diff --git a/.sqlx/query-4bdcd8d475ffd8aab728ec2b9d0d8c578770e2d52bf531de6e69561a4adbb21c.json b/.sqlx/query-4bdcd8d475ffd8aab728ec2b9d0d8c578770e2d52bf531de6e69561a4adbb21c.json index ece09b87..8dbb2340 100644 --- a/.sqlx/query-4bdcd8d475ffd8aab728ec2b9d0d8c578770e2d52bf531de6e69561a4adbb21c.json +++ b/.sqlx/query-4bdcd8d475ffd8aab728ec2b9d0d8c578770e2d52bf531de6e69561a4adbb21c.json @@ -87,6 +87,11 @@ "ordinal": 16, "name": "name", "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "cloud_id", + "type_info": "Int4" } ], "parameters": { @@ -111,6 +116,7 @@ true, false, false, + true, true ] }, diff --git a/.sqlx/query-51517c5eb7f50e463ba2968f4d94e2285b551e817f881b7193fc88189b4001e0.json b/.sqlx/query-51517c5eb7f50e463ba2968f4d94e2285b551e817f881b7193fc88189b4001e0.json new file mode 100644 index 00000000..b05bc5ed --- /dev/null +++ b/.sqlx/query-51517c5eb7f50e463ba2968f4d94e2285b551e817f881b7193fc88189b4001e0.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE stack_template SET status = 'submitted', approved_at = NULL WHERE id = $1::uuid AND status = 'approved'", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "51517c5eb7f50e463ba2968f4d94e2285b551e817f881b7193fc88189b4001e0" +} diff --git a/.sqlx/query-53a76c5d7dbb79cb51cace5ffacc2cf689a650fb90bccfb80689ef3c5b73a2b0.json b/.sqlx/query-53a76c5d7dbb79cb51cace5ffacc2cf689a650fb90bccfb80689ef3c5b73a2b0.json index 78e33c05..a4ecd8b4 100644 --- a/.sqlx/query-53a76c5d7dbb79cb51cace5ffacc2cf689a650fb90bccfb80689ef3c5b73a2b0.json +++ b/.sqlx/query-53a76c5d7dbb79cb51cace5ffacc2cf689a650fb90bccfb80689ef3c5b73a2b0.json @@ -147,6 +147,11 @@ "ordinal": 28, "name": "parent_app_code", "type_info": "Varchar" + }, + { + "ordinal": 29, + "name": "deployment_id", + "type_info": "Int4" } ], "parameters": { @@ -183,6 +188,7 @@ true, true, true, + true, true ] }, diff --git a/.sqlx/query-4fbb395f2080f29291ea091d2c4135b962e41b4e5b49d20e9d5fee3da051aeba.json b/.sqlx/query-58451f6a71d026c5d868c22d58513e193b2b157f0c679c54791276fed9d638aa.json similarity index 78% rename from .sqlx/query-4fbb395f2080f29291ea091d2c4135b962e41b4e5b49d20e9d5fee3da051aeba.json rename to .sqlx/query-58451f6a71d026c5d868c22d58513e193b2b157f0c679c54791276fed9d638aa.json index 49c82f09..6064e69a 100644 --- a/.sqlx/query-4fbb395f2080f29291ea091d2c4135b962e41b4e5b49d20e9d5fee3da051aeba.json +++ b/.sqlx/query-58451f6a71d026c5d868c22d58513e193b2b157f0c679c54791276fed9d638aa.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.required_plan_name,\n t.created_at,\n t.updated_at,\n t.approved_at\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.creator_user_id = $1\n ORDER BY t.created_at DESC", + "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.required_plan_name,\n t.price,\n t.billing_cycle,\n t.currency,\n t.created_at,\n t.updated_at,\n t.approved_at\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.slug = $1 AND t.status = 'approved'", "describe": { "columns": [ { @@ -85,16 +85,31 @@ }, { "ordinal": 16, + "name": "price", + "type_info": "Float8" + }, + { + "ordinal": 17, + "name": "billing_cycle", + "type_info": "Varchar" + }, + { + "ordinal": 18, + "name": "currency", + "type_info": "Varchar" + }, + { + "ordinal": 19, "name": "created_at", "type_info": "Timestamptz" }, { - "ordinal": 17, + "ordinal": 20, "name": "updated_at", "type_info": "Timestamptz" }, { - "ordinal": 18, + "ordinal": 21, "name": "approved_at", "type_info": "Timestamptz" } @@ -123,8 +138,11 @@ true, true, true, + true, + true, + true, true ] }, - "hash": "4fbb395f2080f29291ea091d2c4135b962e41b4e5b49d20e9d5fee3da051aeba" + "hash": "58451f6a71d026c5d868c22d58513e193b2b157f0c679c54791276fed9d638aa" } diff --git a/.sqlx/query-5d36c126c67a5b70ac168bc46fcff3ee63ae5548ce78f244099f9d61ca694312.json b/.sqlx/query-5d36c126c67a5b70ac168bc46fcff3ee63ae5548ce78f244099f9d61ca694312.json index 93848280..73f0154e 100644 --- a/.sqlx/query-5d36c126c67a5b70ac168bc46fcff3ee63ae5548ce78f244099f9d61ca694312.json +++ b/.sqlx/query-5d36c126c67a5b70ac168bc46fcff3ee63ae5548ce78f244099f9d61ca694312.json @@ -147,6 +147,11 @@ "ordinal": 28, "name": "parent_app_code", "type_info": "Varchar" + }, + { + "ordinal": 29, + "name": "deployment_id", + "type_info": "Int4" } ], "parameters": { @@ -184,6 +189,7 @@ true, true, true, + true, true ] }, diff --git a/.sqlx/query-61311962504783c92869a5917be44cd9deb3ad2cad651ef15cb4d81c38c03cb5.json b/.sqlx/query-61311962504783c92869a5917be44cd9deb3ad2cad651ef15cb4d81c38c03cb5.json new file mode 100644 index 00000000..3e12a43d --- /dev/null +++ b/.sqlx/query-61311962504783c92869a5917be44cd9deb3ad2cad651ef15cb4d81c38c03cb5.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO stack_template_review (template_id, reviewer_user_id, decision, review_reason, reviewed_at) VALUES ($1::uuid, $2, 'unapproved', $3, now())", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid", + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "61311962504783c92869a5917be44cd9deb3ad2cad651ef15cb4d81c38c03cb5" +} diff --git a/.sqlx/query-7563c1c8327e4f89f658bdf48ae243bc6e8d150bbce86b7c147a9fca07c6d08c.json b/.sqlx/query-7563c1c8327e4f89f658bdf48ae243bc6e8d150bbce86b7c147a9fca07c6d08c.json new file mode 100644 index 00000000..fa5021e8 --- /dev/null +++ b/.sqlx/query-7563c1c8327e4f89f658bdf48ae243bc6e8d150bbce86b7c147a9fca07c6d08c.json @@ -0,0 +1,36 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO server (\n user_id,\n project_id,\n cloud_id,\n region,\n zone,\n server,\n os,\n disk_type,\n created_at,\n updated_at,\n srv_ip,\n ssh_user,\n ssh_port,\n vault_key_path,\n connection_mode,\n key_status,\n name\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, NOW() at time zone 'utc',NOW() at time zone 'utc', $9, $10, $11, $12, $13, $14, $15)\n RETURNING id;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Int4", + "Int4", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Int4", + "Varchar", + "Varchar", + "Varchar", + "Varchar" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7563c1c8327e4f89f658bdf48ae243bc6e8d150bbce86b7c147a9fca07c6d08c" +} diff --git a/.sqlx/query-7a6b4eb7eefd541ecb0529783ac01c36b2e69902623f289bd3cc6bf73d2b0ce8.json b/.sqlx/query-7a6b4eb7eefd541ecb0529783ac01c36b2e69902623f289bd3cc6bf73d2b0ce8.json index 0fc08b84..13937cf3 100644 --- a/.sqlx/query-7a6b4eb7eefd541ecb0529783ac01c36b2e69902623f289bd3cc6bf73d2b0ce8.json +++ b/.sqlx/query-7a6b4eb7eefd541ecb0529783ac01c36b2e69902623f289bd3cc6bf73d2b0ce8.json @@ -87,6 +87,11 @@ "ordinal": 16, "name": "name", "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "cloud_id", + "type_info": "Int4" } ], "parameters": { @@ -113,6 +118,7 @@ true, false, false, + true, true ] }, diff --git a/.sqlx/query-7c087b528df89eb0bf41a4e46bcc48ab4946535a96baf0f49996d79387a3791c.json b/.sqlx/query-7c087b528df89eb0bf41a4e46bcc48ab4946535a96baf0f49996d79387a3791c.json index 7967fe5f..ed4a6406 100644 --- a/.sqlx/query-7c087b528df89eb0bf41a4e46bcc48ab4946535a96baf0f49996d79387a3791c.json +++ b/.sqlx/query-7c087b528df89eb0bf41a4e46bcc48ab4946535a96baf0f49996d79387a3791c.json @@ -87,6 +87,11 @@ "ordinal": 16, "name": "name", "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "cloud_id", + "type_info": "Int4" } ], "parameters": { @@ -111,6 +116,7 @@ true, false, false, + true, true ] }, diff --git a/.sqlx/query-83769682ee9bf8c76f1058a71b11d7d009683eef134e7be5b4ac285333822f58.json b/.sqlx/query-7e5e7d4fa4e56ca213dee602bf13ccbe9a3424d81d6db3534ba4a59967b63105.json similarity index 77% rename from .sqlx/query-83769682ee9bf8c76f1058a71b11d7d009683eef134e7be5b4ac285333822f58.json rename to .sqlx/query-7e5e7d4fa4e56ca213dee602bf13ccbe9a3424d81d6db3534ba4a59967b63105.json index eb70c112..5c5653e1 100644 --- a/.sqlx/query-83769682ee9bf8c76f1058a71b11d7d009683eef134e7be5b4ac285333822f58.json +++ b/.sqlx/query-7e5e7d4fa4e56ca213dee602bf13ccbe9a3424d81d6db3534ba4a59967b63105.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE server\n SET\n user_id=$2,\n project_id=$3,\n region=$4,\n zone=$5,\n server=$6,\n os=$7,\n disk_type=$8,\n updated_at=NOW() at time zone 'utc',\n srv_ip=$9,\n ssh_user=$10,\n ssh_port=$11,\n vault_key_path=$12,\n connection_mode=$13,\n key_status=$14,\n name=$15\n WHERE id = $1\n RETURNING *\n ", + "query": "\n UPDATE server\n SET\n user_id=$2,\n project_id=$3,\n cloud_id=$4,\n region=$5,\n zone=$6,\n server=$7,\n os=$8,\n disk_type=$9,\n updated_at=NOW() at time zone 'utc',\n srv_ip=$10,\n ssh_user=$11,\n ssh_port=$12,\n vault_key_path=$13,\n connection_mode=$14,\n key_status=$15,\n name=$16\n WHERE id = $1\n RETURNING *\n ", "describe": { "columns": [ { @@ -87,6 +87,11 @@ "ordinal": 16, "name": "name", "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "cloud_id", + "type_info": "Int4" } ], "parameters": { @@ -94,6 +99,7 @@ "Int4", "Varchar", "Int4", + "Int4", "Varchar", "Varchar", "Varchar", @@ -125,8 +131,9 @@ true, false, false, + true, true ] }, - "hash": "83769682ee9bf8c76f1058a71b11d7d009683eef134e7be5b4ac285333822f58" + "hash": "7e5e7d4fa4e56ca213dee602bf13ccbe9a3424d81d6db3534ba4a59967b63105" } diff --git a/.sqlx/query-8cfb2d3a45ff6c5d1d51a98f6a37ba89da5a49c211c8627c314b8a32c92a62e1.json b/.sqlx/query-8cfb2d3a45ff6c5d1d51a98f6a37ba89da5a49c211c8627c314b8a32c92a62e1.json index 24aef18f..06c565c9 100644 --- a/.sqlx/query-8cfb2d3a45ff6c5d1d51a98f6a37ba89da5a49c211c8627c314b8a32c92a62e1.json +++ b/.sqlx/query-8cfb2d3a45ff6c5d1d51a98f6a37ba89da5a49c211c8627c314b8a32c92a62e1.json @@ -87,6 +87,11 @@ "ordinal": 16, "name": "name", "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "cloud_id", + "type_info": "Int4" } ], "parameters": { @@ -111,6 +116,7 @@ true, false, false, + true, true ] }, diff --git a/.sqlx/query-970e2fc198c379a19849c4621adeca951c761f6b9abd6c70158000e0c03ca7c7.json b/.sqlx/query-91c6d630cb34f4d85a8d9ecdf7a1438ccb73ce433d52a4243d9ebc0b98124310.json similarity index 78% rename from .sqlx/query-970e2fc198c379a19849c4621adeca951c761f6b9abd6c70158000e0c03ca7c7.json rename to .sqlx/query-91c6d630cb34f4d85a8d9ecdf7a1438ccb73ce433d52a4243d9ebc0b98124310.json index 0b5b79fc..1a20b94d 100644 --- a/.sqlx/query-970e2fc198c379a19849c4621adeca951c761f6b9abd6c70158000e0c03ca7c7.json +++ b/.sqlx/query-91c6d630cb34f4d85a8d9ecdf7a1438ccb73ce433d52a4243d9ebc0b98124310.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.required_plan_name,\n t.created_at,\n t.updated_at,\n t.approved_at\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.slug = $1 AND t.status = 'approved'", + "query": "SELECT \n t.id,\n t.creator_user_id,\n t.creator_name,\n t.name,\n t.slug,\n t.short_description,\n t.long_description,\n c.name AS \"category_code?\",\n t.product_id,\n t.tags,\n t.tech_stack,\n t.status,\n t.is_configurable,\n t.view_count,\n t.deploy_count,\n t.required_plan_name,\n t.price,\n t.billing_cycle,\n t.currency,\n t.created_at,\n t.updated_at,\n t.approved_at\n FROM stack_template t\n LEFT JOIN stack_category c ON t.category_id = c.id\n WHERE t.creator_user_id = $1\n ORDER BY t.created_at DESC", "describe": { "columns": [ { @@ -85,16 +85,31 @@ }, { "ordinal": 16, + "name": "price", + "type_info": "Float8" + }, + { + "ordinal": 17, + "name": "billing_cycle", + "type_info": "Varchar" + }, + { + "ordinal": 18, + "name": "currency", + "type_info": "Varchar" + }, + { + "ordinal": 19, "name": "created_at", "type_info": "Timestamptz" }, { - "ordinal": 17, + "ordinal": 20, "name": "updated_at", "type_info": "Timestamptz" }, { - "ordinal": 18, + "ordinal": 21, "name": "approved_at", "type_info": "Timestamptz" } @@ -123,8 +138,11 @@ true, true, true, + true, + true, + true, true ] }, - "hash": "970e2fc198c379a19849c4621adeca951c761f6b9abd6c70158000e0c03ca7c7" + "hash": "91c6d630cb34f4d85a8d9ecdf7a1438ccb73ce433d52a4243d9ebc0b98124310" } diff --git a/.sqlx/query-a24f6ae41366cfc2480a7d7832b1f823cc91662394ec8025b7ef486b85374411.json b/.sqlx/query-a24f6ae41366cfc2480a7d7832b1f823cc91662394ec8025b7ef486b85374411.json index d481a709..cb408194 100644 --- a/.sqlx/query-a24f6ae41366cfc2480a7d7832b1f823cc91662394ec8025b7ef486b85374411.json +++ b/.sqlx/query-a24f6ae41366cfc2480a7d7832b1f823cc91662394ec8025b7ef486b85374411.json @@ -87,6 +87,11 @@ "ordinal": 16, "name": "name", "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "cloud_id", + "type_info": "Int4" } ], "parameters": { @@ -112,6 +117,7 @@ true, false, false, + true, true ] }, diff --git a/.sqlx/query-a6cac393c36ca2ae08d877f5ed2ed699a7a29f68625f6ceab653980e8ff8d2ac.json b/.sqlx/query-a6cac393c36ca2ae08d877f5ed2ed699a7a29f68625f6ceab653980e8ff8d2ac.json new file mode 100644 index 00000000..67665554 --- /dev/null +++ b/.sqlx/query-a6cac393c36ca2ae08d877f5ed2ed699a7a29f68625f6ceab653980e8ff8d2ac.json @@ -0,0 +1,130 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n s.id,\n s.user_id,\n s.project_id,\n s.cloud_id,\n c.provider as cloud,\n s.region,\n s.zone,\n s.server,\n s.os,\n s.disk_type,\n s.created_at,\n s.updated_at,\n s.srv_ip,\n s.ssh_port,\n s.ssh_user,\n s.vault_key_path,\n s.connection_mode,\n s.key_status,\n s.name\n FROM server s\n LEFT JOIN cloud c ON s.cloud_id = c.id\n WHERE s.user_id=$1\n ORDER BY s.created_at DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "user_id", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "project_id", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "cloud_id", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "cloud", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "region", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "zone", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "server", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "os", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "disk_type", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "created_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 11, + "name": "updated_at", + "type_info": "Timestamptz" + }, + { + "ordinal": 12, + "name": "srv_ip", + "type_info": "Varchar" + }, + { + "ordinal": 13, + "name": "ssh_port", + "type_info": "Int4" + }, + { + "ordinal": 14, + "name": "ssh_user", + "type_info": "Varchar" + }, + { + "ordinal": 15, + "name": "vault_key_path", + "type_info": "Varchar" + }, + { + "ordinal": 16, + "name": "connection_mode", + "type_info": "Varchar" + }, + { + "ordinal": 17, + "name": "key_status", + "type_info": "Varchar" + }, + { + "ordinal": 18, + "name": "name", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + true, + true, + true, + true, + true, + false, + false, + true, + true, + true, + true, + false, + false, + true + ] + }, + "hash": "a6cac393c36ca2ae08d877f5ed2ed699a7a29f68625f6ceab653980e8ff8d2ac" +} diff --git a/.sqlx/query-d4fdef5755536c2b9e0b56448c9f7b9143ee3a6fc9b363f93d0c816d44ebbbb0.json b/.sqlx/query-d4fdef5755536c2b9e0b56448c9f7b9143ee3a6fc9b363f93d0c816d44ebbbb0.json new file mode 100644 index 00000000..c966c3b7 --- /dev/null +++ b/.sqlx/query-d4fdef5755536c2b9e0b56448c9f7b9143ee3a6fc9b363f93d0c816d44ebbbb0.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE stack_template SET status = 'submitted', updated_at = now()\n WHERE id = $1::uuid AND status IN ('rejected', 'needs_changes', 'approved')", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "d4fdef5755536c2b9e0b56448c9f7b9143ee3a6fc9b363f93d0c816d44ebbbb0" +} diff --git a/.sqlx/query-0faf1a2932ba1b37fc9f982bc86c323869489c6dc7e17479b647f0aa799df910.json b/.sqlx/query-e1258273806ab030586a80cb7ac83a5339d0a631fc702082f95642ebb0c1d3a7.json similarity index 60% rename from .sqlx/query-0faf1a2932ba1b37fc9f982bc86c323869489c6dc7e17479b647f0aa799df910.json rename to .sqlx/query-e1258273806ab030586a80cb7ac83a5339d0a631fc702082f95642ebb0c1d3a7.json index 5b7cb8ea..64a3f11f 100644 --- a/.sqlx/query-0faf1a2932ba1b37fc9f982bc86c323869489c6dc7e17479b647f0aa799df910.json +++ b/.sqlx/query-e1258273806ab030586a80cb7ac83a5339d0a631fc702082f95642ebb0c1d3a7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "UPDATE stack_template SET status = 'submitted' WHERE id = $1::uuid AND status IN ('draft','rejected')", + "query": "UPDATE stack_template SET status = 'submitted' WHERE id = $1::uuid AND status IN ('draft','rejected','needs_changes')", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "0faf1a2932ba1b37fc9f982bc86c323869489c6dc7e17479b647f0aa799df910" + "hash": "e1258273806ab030586a80cb7ac83a5339d0a631fc702082f95642ebb0c1d3a7" } diff --git a/.sqlx/query-1fc71c48b12866a80749de677b0c3b478efa6ee82397af82d21bc88110bf8ad1.json b/.sqlx/query-fdb45a4fb83d33464cddc021f3cdfebd5dd137795ab393492b02ab517546a708.json similarity index 90% rename from .sqlx/query-1fc71c48b12866a80749de677b0c3b478efa6ee82397af82d21bc88110bf8ad1.json rename to .sqlx/query-fdb45a4fb83d33464cddc021f3cdfebd5dd137795ab393492b02ab517546a708.json index 2c330971..f9b29b17 100644 --- a/.sqlx/query-1fc71c48b12866a80749de677b0c3b478efa6ee82397af82d21bc88110bf8ad1.json +++ b/.sqlx/query-fdb45a4fb83d33464cddc021f3cdfebd5dd137795ab393492b02ab517546a708.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE project_app SET\n code = $2,\n name = $3,\n image = $4,\n environment = $5,\n ports = $6,\n volumes = $7,\n domain = $8,\n ssl_enabled = $9,\n resources = $10,\n restart_policy = $11,\n command = $12,\n entrypoint = $13,\n networks = $14,\n depends_on = $15,\n healthcheck = $16,\n labels = $17,\n config_files = $18,\n template_source = $19,\n enabled = $20,\n deploy_order = $21,\n parent_app_code = $22,\n config_version = COALESCE(config_version, 0) + 1,\n updated_at = NOW()\n WHERE id = $1\n RETURNING *\n ", + "query": "\n UPDATE project_app SET\n code = $2,\n name = $3,\n image = $4,\n environment = $5,\n ports = $6,\n volumes = $7,\n domain = $8,\n ssl_enabled = $9,\n resources = $10,\n restart_policy = $11,\n command = $12,\n entrypoint = $13,\n networks = $14,\n depends_on = $15,\n healthcheck = $16,\n labels = $17,\n config_files = $18,\n template_source = $19,\n enabled = $20,\n deploy_order = $21,\n parent_app_code = $22,\n deployment_id = $23,\n config_version = COALESCE(config_version, 0) + 1,\n updated_at = NOW()\n WHERE id = $1\n RETURNING *\n ", "describe": { "columns": [ { @@ -147,6 +147,11 @@ "ordinal": 28, "name": "parent_app_code", "type_info": "Varchar" + }, + { + "ordinal": 29, + "name": "deployment_id", + "type_info": "Int4" } ], "parameters": { @@ -172,7 +177,8 @@ "Varchar", "Bool", "Int4", - "Varchar" + "Varchar", + "Int4" ] }, "nullable": [ @@ -204,8 +210,9 @@ true, true, true, + true, true ] }, - "hash": "1fc71c48b12866a80749de677b0c3b478efa6ee82397af82d21bc88110bf8ad1" + "hash": "fdb45a4fb83d33464cddc021f3cdfebd5dd137795ab393492b02ab517546a708" } diff --git a/Cargo.lock b/Cargo.lock index f53857dd..4acfbd21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -742,6 +742,17 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" +[[package]] +name = "bcrypt-pbkdf" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aeac2e1fe888769f34f05ac343bbef98b14d1ffb292ab69d4608b3abc86f2a2" +dependencies = [ + "blowfish", + "pbkdf2 0.12.2", + "sha2", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -788,6 +799,16 @@ dependencies = [ "piper", ] +[[package]] +name = "blowfish" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" +dependencies = [ + "byteorder", + "cipher", +] + [[package]] name = "brotli" version = "3.5.0" @@ -963,6 +984,17 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + [[package]] name = "chrono" version = "0.4.42" @@ -1310,6 +1342,7 @@ dependencies = [ "fiat-crypto", "rustc_version", "subtle", + "zeroize", ] [[package]] @@ -1594,6 +1627,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -1655,6 +1709,7 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ + "pkcs8", "signature", ] @@ -1666,8 +1721,11 @@ checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", + "rand_core 0.6.4", + "serde", "sha2", "subtle", + "zeroize", ] [[package]] @@ -1691,6 +1749,8 @@ dependencies = [ "ff", "generic-array", "group", + "hkdf", + "pem-rfc7468", "pkcs8", "rand_core 0.6.4", "sec1", @@ -2232,6 +2292,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + [[package]] name = "hkdf" version = "0.12.4" @@ -2795,6 +2861,12 @@ dependencies = [ "digest", ] +[[package]] +name = "md5" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" + [[package]] name = "memchr" version = "2.7.6" @@ -2919,6 +2991,7 @@ checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", + "rand 0.8.5", ] [[package]] @@ -3063,6 +3136,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-multimap" version = "0.4.3" @@ -3171,6 +3250,17 @@ dependencies = [ "regex", ] +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "paste" version = "1.0.15" @@ -3183,6 +3273,18 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest", + "hmac", + "password-hash", + "sha2", +] + [[package]] name = "pbkdf2" version = "0.12.2" @@ -3389,7 +3491,7 @@ dependencies = [ "aes", "cbc", "der", - "pbkdf2", + "pbkdf2 0.12.2", "scrypt", "sha2", "spki", @@ -3402,6 +3504,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", + "pkcs5", + "rand_core 0.6.4", "spki", ] @@ -3441,6 +3545,17 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "polyval" version = "0.6.2" @@ -3715,6 +3830,17 @@ dependencies = [ "bitflags 2.10.0", ] +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + [[package]] name = "regex" version = "1.12.2" @@ -3882,6 +4008,109 @@ dependencies = [ "zeroize", ] +[[package]] +name = "russh" +version = "0.44.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6500eedfaf8cd81597899d896908a4b9cd5cb566db875e843c04ccf92add2c16" +dependencies = [ + "aes", + "aes-gcm", + "async-trait", + "bitflags 2.10.0", + "byteorder", + "cbc", + "chacha20", + "ctr", + "curve25519-dalek", + "digest", + "elliptic-curve", + "flate2", + "futures", + "generic-array", + "hex-literal", + "hmac", + "log", + "num-bigint", + "once_cell", + "p256", + "p384", + "p521", + "poly1305", + "rand 0.8.5", + "rand_core 0.6.4", + "russh-cryptovec", + "russh-keys", + "sha1", + "sha2", + "ssh-encoding", + "ssh-key", + "subtle", + "thiserror 1.0.69", + "tokio", +] + +[[package]] +name = "russh-cryptovec" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fadd2c0ab350e21c66556f94ee06f766d8bdae3213857ba7610bfd8e10e51880" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "russh-keys" +version = "0.44.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb8c0bfe024d4edd242f65a2ac6c8bf38a892930050b9eb90909d8fc2c413c8d" +dependencies = [ + "aes", + "async-trait", + "bcrypt-pbkdf", + "block-padding", + "byteorder", + "cbc", + "ctr", + "data-encoding", + "der", + "digest", + "dirs", + "ecdsa", + "ed25519-dalek", + "elliptic-curve", + "futures", + "hmac", + "inout", + "log", + "md5", + "num-integer", + "p256", + "p384", + "p521", + "pbkdf2 0.11.0", + "pkcs1", + "pkcs5", + "pkcs8", + "rand 0.8.5", + "rand_core 0.6.4", + "rsa", + "russh-cryptovec", + "sec1", + "serde", + "sha1", + "sha2", + "spki", + "ssh-encoding", + "ssh-key", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "typenum", + "zeroize", +] + [[package]] name = "rust-ini" version = "0.18.0" @@ -4066,7 +4295,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" dependencies = [ - "pbkdf2", + "pbkdf2 0.12.2", "salsa20", "sha2", ] @@ -4691,8 +4920,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caac132742f0d33c3af65bfcde7f6aa8f62f0e991d80db99149eb9d44708784f" dependencies = [ + "aes", + "aes-gcm", + "cbc", + "chacha20", "cipher", + "ctr", + "poly1305", "ssh-encoding", + "subtle", ] [[package]] @@ -4712,7 +4948,9 @@ version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b86f5297f0f04d08cabaa0f6bff7cb6aec4d9c3b49d87990d63da9d9156a8c3" dependencies = [ + "bcrypt-pbkdf", "ed25519-dalek", + "num-bigint-dig", "p256", "p384", "p521", @@ -4767,6 +5005,8 @@ dependencies = [ "redis", "regex", "reqwest", + "russh", + "russh-keys", "serde", "serde_derive", "serde_json", @@ -6128,4 +6368,4 @@ checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", -] \ No newline at end of file +] diff --git a/Cargo.toml b/Cargo.toml index 724c077d..805044a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,8 @@ tracing-actix-web = "0.7.7" regex = "1.10.2" rand = "0.8.5" ssh-key = { version = "0.6", features = ["ed25519", "rand_core"] } +russh = "0.44" +russh-keys = "0.44" futures-util = "0.3.29" futures = "0.3.29" tokio-stream = "0.1.14" diff --git a/docker/dev/.env b/docker/dev/.env index c7a23fdb..892f3064 100644 --- a/docker/dev/.env +++ b/docker/dev/.env @@ -14,6 +14,7 @@ VAULT_AGENT_PATH_PREFIX=agent ### 10.3 Environment Variables Required # User Service integration USER_SERVICE_URL=http://user:4100 +STACKER_SERVICE_TOKEN=changeme # Slack escalation SLACK_SUPPORT_WEBHOOK_URL= diff --git a/docker/local/.env b/docker/local/.env index 6371a972..3c22cfb0 100644 --- a/docker/local/.env +++ b/docker/local/.env @@ -1,5 +1,9 @@ -DATABASE_URL=postgres://postgres:postgres@stackerdb:5432/stacker +DATABASE_URL=postgres://postgres:postgres@localhost:5432/stacker POSTGRES_USER=postgres POSTGRES_PASSWORD=postgres POSTGRES_DB=stacker -POSTGRES_PORT=5432 \ No newline at end of file +POSTGRES_PORT=5432 + +# Webhook to User Service (marketplace approval flow) +URL_SERVER_USER=http://user:4100 +STACKER_SERVICE_TOKEN=changeme \ No newline at end of file diff --git a/migrations/20260206120000_casbin_project_app_rules.down.sql b/migrations/20260206120000_casbin_project_app_rules.down.sql new file mode 100644 index 00000000..5fd4b198 --- /dev/null +++ b/migrations/20260206120000_casbin_project_app_rules.down.sql @@ -0,0 +1,13 @@ +-- Remove Casbin rules for project app routes +DELETE FROM public.casbin_rule +WHERE ptype = 'p' + AND v0 = 'group_user' + AND v1 IN ( + '/project/:id/apps', + '/project/:id/apps/:code', + '/project/:id/apps/:code/config', + '/project/:id/apps/:code/env', + '/project/:id/apps/:code/env/:name', + '/project/:id/apps/:code/ports', + '/project/:id/apps/:code/domain' + ); diff --git a/migrations/20260206120000_casbin_project_app_rules.up.sql b/migrations/20260206120000_casbin_project_app_rules.up.sql new file mode 100644 index 00000000..f11545de --- /dev/null +++ b/migrations/20260206120000_casbin_project_app_rules.up.sql @@ -0,0 +1,24 @@ +-- Add Casbin rules for project app CRUD and configuration endpoints +-- These routes were added via project_app table but never got Casbin policies + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES + -- List apps in a project + ('p', 'group_user', '/project/:id/apps', 'GET', '', '', ''), + -- Create app in a project + ('p', 'group_user', '/project/:id/apps', 'POST', '', '', ''), + -- Get a specific app by code + ('p', 'group_user', '/project/:id/apps/:code', 'GET', '', '', ''), + -- Get app configuration + ('p', 'group_user', '/project/:id/apps/:code/config', 'GET', '', '', ''), + -- Get app environment variables + ('p', 'group_user', '/project/:id/apps/:code/env', 'GET', '', '', ''), + -- Update app environment variables + ('p', 'group_user', '/project/:id/apps/:code/env', 'PUT', '', '', ''), + -- Delete a specific environment variable + ('p', 'group_user', '/project/:id/apps/:code/env/:name', 'DELETE', '', '', ''), + -- Update app port mappings + ('p', 'group_user', '/project/:id/apps/:code/ports', 'PUT', '', '', ''), + -- Update app domain settings + ('p', 'group_user', '/project/:id/apps/:code/domain', 'PUT', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/migrations/20260209120000_casbin_root_to_group_admin.down.sql b/migrations/20260209120000_casbin_root_to_group_admin.down.sql new file mode 100644 index 00000000..9b8721af --- /dev/null +++ b/migrations/20260209120000_casbin_root_to_group_admin.down.sql @@ -0,0 +1,2 @@ +DELETE FROM public.casbin_rule +WHERE ptype = 'g' AND v0 = 'root' AND v1 = 'group_admin'; diff --git a/migrations/20260209120000_casbin_root_to_group_admin.up.sql b/migrations/20260209120000_casbin_root_to_group_admin.up.sql new file mode 100644 index 00000000..edb0dda8 --- /dev/null +++ b/migrations/20260209120000_casbin_root_to_group_admin.up.sql @@ -0,0 +1,7 @@ +-- Map User Service 'root' role to stacker 'group_admin' role group +-- User Service /me endpoint returns role="root" for admin users, +-- but stacker Casbin policies use 'group_admin' for admin-level access. +-- This grouping rule bridges the two role systems. +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('g', 'root', 'group_admin', '', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/migrations/20260210130000_casbin_admin_template_detail.down.sql b/migrations/20260210130000_casbin_admin_template_detail.down.sql new file mode 100644 index 00000000..6fd7a002 --- /dev/null +++ b/migrations/20260210130000_casbin_admin_template_detail.down.sql @@ -0,0 +1,5 @@ +-- Remove Casbin rules for admin template detail endpoint +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'admin_service' AND v1 = '/api/admin/templates/:id' AND v2 = 'GET'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'group_admin' AND v1 = '/api/admin/templates/:id' AND v2 = 'GET'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'admin_service' AND v1 = '/stacker/admin/templates/:id' AND v2 = 'GET'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v0 = 'group_admin' AND v1 = '/stacker/admin/templates/:id' AND v2 = 'GET'; diff --git a/migrations/20260210130000_casbin_admin_template_detail.up.sql b/migrations/20260210130000_casbin_admin_template_detail.up.sql new file mode 100644 index 00000000..e3047c2b --- /dev/null +++ b/migrations/20260210130000_casbin_admin_template_detail.up.sql @@ -0,0 +1,16 @@ +-- Add Casbin rules for admin template detail endpoint (GET /api/admin/templates/:id) +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/api/admin/templates/:id', 'GET', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/api/admin/templates/:id', 'GET', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/stacker/admin/templates/:id', 'GET', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/stacker/admin/templates/:id', 'GET', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/migrations/20260210140000_casbin_admin_security_scan.down.sql b/migrations/20260210140000_casbin_admin_security_scan.down.sql new file mode 100644 index 00000000..aa4bbc97 --- /dev/null +++ b/migrations/20260210140000_casbin_admin_security_scan.down.sql @@ -0,0 +1,3 @@ +-- Remove Casbin rules for admin template security scan endpoint +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v1 = '/api/admin/templates/:id/security-scan' AND v2 = 'POST'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v1 = '/stacker/admin/templates/:id/security-scan' AND v2 = 'POST'; diff --git a/migrations/20260210140000_casbin_admin_security_scan.up.sql b/migrations/20260210140000_casbin_admin_security_scan.up.sql new file mode 100644 index 00000000..7f56ba57 --- /dev/null +++ b/migrations/20260210140000_casbin_admin_security_scan.up.sql @@ -0,0 +1,16 @@ +-- Add Casbin rules for admin template security scan endpoint +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/api/admin/templates/:id/security-scan', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/api/admin/templates/:id/security-scan', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/stacker/admin/templates/:id/security-scan', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/stacker/admin/templates/:id/security-scan', 'POST', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/migrations/20260210150000_casbin_resubmit_template.down.sql b/migrations/20260210150000_casbin_resubmit_template.down.sql new file mode 100644 index 00000000..20f5010e --- /dev/null +++ b/migrations/20260210150000_casbin_resubmit_template.down.sql @@ -0,0 +1,2 @@ +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v1 = '/api/templates/:id/resubmit' AND v2 = 'POST'; +DELETE FROM public.casbin_rule WHERE ptype = 'p' AND v1 = '/stacker/templates/:id/resubmit' AND v2 = 'POST'; diff --git a/migrations/20260210150000_casbin_resubmit_template.up.sql b/migrations/20260210150000_casbin_resubmit_template.up.sql new file mode 100644 index 00000000..7d553d6c --- /dev/null +++ b/migrations/20260210150000_casbin_resubmit_template.up.sql @@ -0,0 +1,25 @@ +-- Allow users and admins to resubmit templates with new versions +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_user', '/api/templates/:id/resubmit', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/api/templates/:id/resubmit', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/api/templates/:id/resubmit', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +-- Also cover /stacker/ prefixed paths (nginx proxy) +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_user', '/stacker/templates/:id/resubmit', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/stacker/templates/:id/resubmit', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/stacker/templates/:id/resubmit', 'POST', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/migrations/20260210160000_casbin_admin_unapprove.down.sql b/migrations/20260210160000_casbin_admin_unapprove.down.sql new file mode 100644 index 00000000..d99ff278 --- /dev/null +++ b/migrations/20260210160000_casbin_admin_unapprove.down.sql @@ -0,0 +1,3 @@ +-- Remove Casbin rules for admin template unapprove endpoint +DELETE FROM public.casbin_rule +WHERE ptype = 'p' AND v1 = '/api/admin/templates/:id/unapprove' AND v2 = 'POST'; diff --git a/migrations/20260210160000_casbin_admin_unapprove.up.sql b/migrations/20260210160000_casbin_admin_unapprove.up.sql new file mode 100644 index 00000000..6058b0b2 --- /dev/null +++ b/migrations/20260210160000_casbin_admin_unapprove.up.sql @@ -0,0 +1,12 @@ +-- Add Casbin rules for admin template unapprove endpoint +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'admin_service', '/api/admin/templates/:id/unapprove', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'group_admin', '/api/admin/templates/:id/unapprove', 'POST', '', '', '') +ON CONFLICT DO NOTHING; + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES ('p', 'root', '/api/admin/templates/:id/unapprove', 'POST', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/migrations/20260211100000_add_pricing_to_stack_template.down.sql b/migrations/20260211100000_add_pricing_to_stack_template.down.sql new file mode 100644 index 00000000..72351e9e --- /dev/null +++ b/migrations/20260211100000_add_pricing_to_stack_template.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE stack_template DROP COLUMN IF EXISTS price; +ALTER TABLE stack_template DROP COLUMN IF EXISTS billing_cycle; +ALTER TABLE stack_template DROP COLUMN IF EXISTS currency; diff --git a/migrations/20260211100000_add_pricing_to_stack_template.up.sql b/migrations/20260211100000_add_pricing_to_stack_template.up.sql new file mode 100644 index 00000000..78044280 --- /dev/null +++ b/migrations/20260211100000_add_pricing_to_stack_template.up.sql @@ -0,0 +1,5 @@ +-- Add pricing columns to stack_template +-- Creator sets price during template submission; webhook sends it to User Service products table +ALTER TABLE stack_template ADD COLUMN IF NOT EXISTS price DOUBLE PRECISION DEFAULT 0; +ALTER TABLE stack_template ADD COLUMN IF NOT EXISTS billing_cycle VARCHAR(50) DEFAULT 'free'; +ALTER TABLE stack_template ADD COLUMN IF NOT EXISTS currency VARCHAR(3) DEFAULT 'USD'; diff --git a/migrations/20260211120000_add_deployment_id_to_project_app.down.sql b/migrations/20260211120000_add_deployment_id_to_project_app.down.sql new file mode 100644 index 00000000..0fef063f --- /dev/null +++ b/migrations/20260211120000_add_deployment_id_to_project_app.down.sql @@ -0,0 +1,11 @@ +-- Revert deployment_id addition from project_app + +DROP INDEX IF EXISTS unique_project_app_deployment_code; +DROP INDEX IF EXISTS unique_project_app_code_legacy; +DROP INDEX IF EXISTS idx_project_app_deployment_code; +DROP INDEX IF EXISTS idx_project_app_deployment_id; + +ALTER TABLE project_app DROP COLUMN IF EXISTS deployment_id; + +-- Restore original unique constraint +ALTER TABLE project_app ADD CONSTRAINT unique_project_app_code UNIQUE (project_id, code); diff --git a/migrations/20260211120000_add_deployment_id_to_project_app.up.sql b/migrations/20260211120000_add_deployment_id_to_project_app.up.sql new file mode 100644 index 00000000..0a9def9b --- /dev/null +++ b/migrations/20260211120000_add_deployment_id_to_project_app.up.sql @@ -0,0 +1,39 @@ +-- Add deployment_id to project_app to scope apps per deployment +-- This fixes the bug where all deployments of the same project share the same apps/containers + +ALTER TABLE project_app ADD COLUMN IF NOT EXISTS deployment_id INTEGER; + +-- Add index for fast lookup by deployment +CREATE INDEX IF NOT EXISTS idx_project_app_deployment_id ON project_app(deployment_id); + +-- Composite index for deployment + code lookups +CREATE INDEX IF NOT EXISTS idx_project_app_deployment_code ON project_app(deployment_id, code); + +-- Backfill: for existing project_apps, try to set deployment_id from the latest deployment for their project +UPDATE project_app pa +SET deployment_id = d.id +FROM ( + SELECT DISTINCT ON (project_id) id, project_id + FROM deployment + WHERE deleted = false + ORDER BY project_id, created_at DESC +) d +WHERE pa.project_id = d.project_id + AND pa.deployment_id IS NULL; + +-- Update the unique constraint to be per deployment instead of per project +-- First drop the old constraint +ALTER TABLE project_app DROP CONSTRAINT IF EXISTS unique_project_app_code; + +-- Add new constraint: unique per (project_id, deployment_id, code) +-- Use a partial unique index to handle NULL deployment_id (legacy rows) +CREATE UNIQUE INDEX IF NOT EXISTS unique_project_app_deployment_code + ON project_app (project_id, deployment_id, code) + WHERE deployment_id IS NOT NULL; + +-- Keep backward compatibility: unique per (project_id, code) when deployment_id IS NULL +CREATE UNIQUE INDEX IF NOT EXISTS unique_project_app_code_legacy + ON project_app (project_id, code) + WHERE deployment_id IS NULL; + +COMMENT ON COLUMN project_app.deployment_id IS 'Deployment this app belongs to. NULL for legacy apps created before deployment scoping.'; diff --git a/migrations/20260213100000_add_cloud_id_to_server.down.sql b/migrations/20260213100000_add_cloud_id_to_server.down.sql new file mode 100644 index 00000000..1f184b0e --- /dev/null +++ b/migrations/20260213100000_add_cloud_id_to_server.down.sql @@ -0,0 +1,3 @@ +-- Remove cloud_id from server table +DROP INDEX IF EXISTS idx_server_cloud_id; +ALTER TABLE server DROP COLUMN IF EXISTS cloud_id; diff --git a/migrations/20260213100000_add_cloud_id_to_server.up.sql b/migrations/20260213100000_add_cloud_id_to_server.up.sql new file mode 100644 index 00000000..986758bf --- /dev/null +++ b/migrations/20260213100000_add_cloud_id_to_server.up.sql @@ -0,0 +1,8 @@ +-- Add cloud_id back to server table to track which cloud provider the server belongs to +-- This allows displaying the provider name in the UI and knowing which cloud API to use + +ALTER TABLE server ADD COLUMN cloud_id INTEGER REFERENCES cloud(id) ON DELETE SET NULL; + +CREATE INDEX idx_server_cloud_id ON server(cloud_id); + +COMMENT ON COLUMN server.cloud_id IS 'Reference to the cloud provider (DO, Hetzner, AWS, etc.) this server belongs to'; diff --git a/src/connectors/user_service/app.rs b/src/connectors/user_service/app.rs index 14dfde7f..ae83ed51 100644 --- a/src/connectors/user_service/app.rs +++ b/src/connectors/user_service/app.rs @@ -15,12 +15,18 @@ pub struct Application { pub category: Option, pub docker_image: Option, pub default_port: Option, -} - -// Wrapper types for Eve-style responses -#[derive(Debug, Deserialize)] -struct ApplicationsResponse { - _items: Vec, + /// Ansible role name for template rendering + #[serde(default)] + pub role: Option, + /// Default environment variables from app_var table + #[serde(default)] + pub default_env: Option, + /// Default ports configuration from app table + #[serde(default)] + pub default_ports: Option, + /// Default config file templates from app_var (with attachment_path) + #[serde(default)] + pub default_config_files: Option, } impl UserServiceClient { @@ -30,7 +36,11 @@ impl UserServiceClient { bearer_token: &str, query: Option<&str>, ) -> Result, ConnectorError> { - let url = format!("{}/applications", self.base_url); + let mut url = format!("{}/catalog?kind=app", self.base_url); + if let Some(q) = query { + url.push_str("&q="); + url.push_str(&urlencoding::encode(q)); + } let response = self .http_client @@ -53,12 +63,21 @@ impl UserServiceClient { ))); } - // User Service returns { "_items": [...], "_meta": {...} } - let wrapper: ApplicationsResponse = response + let wrapper: serde_json::Value = response .json() .await .map_err(|e| ConnectorError::InvalidResponse(e.to_string()))?; - let mut apps = wrapper._items; + + let items = wrapper + .get("_items") + .and_then(|v| v.as_array()) + .cloned() + .unwrap_or_default(); + + let mut apps: Vec = items + .into_iter() + .filter_map(application_from_catalog) + .collect(); if let Some(q) = query { let q = q.to_lowercase(); @@ -71,4 +90,129 @@ impl UserServiceClient { Ok(apps) } + + /// Fetch enriched app catalog data from /applications/catalog endpoint. + /// Returns apps with correct Docker images and default env/config from app + app_var tables. + /// Falls back to search_applications() if the catalog endpoint is not available. + pub async fn fetch_app_catalog( + &self, + bearer_token: &str, + code: &str, + ) -> Result, ConnectorError> { + let url = format!( + "{}/applications/catalog/{}", + self.base_url, + urlencoding::encode(code) + ); + + tracing::info!("Fetching app catalog for code={} from {}", code, url); + + let response = match self + .http_client + .get(&url) + .header("Authorization", format!("Bearer {}", bearer_token)) + .send() + .await + { + Ok(resp) => resp, + Err(e) => { + tracing::warn!( + "Catalog endpoint transport error for code={}: {}, falling back to search_applications", + code, e + ); + return self.fallback_search_by_code(bearer_token, code).await; + } + }; + + if response.status() == StatusCode::NOT_FOUND { + tracing::info!( + "Catalog endpoint returned 404 for code={}, falling back to search_applications", + code + ); + return self.fallback_search_by_code(bearer_token, code).await; + } + + if !response.status().is_success() { + let status = response.status().as_u16(); + let body = response.text().await.unwrap_or_default(); + tracing::warn!( + "Catalog endpoint error ({}) for code={}: {}, falling back to search_applications", + status, code, body + ); + return self.fallback_search_by_code(bearer_token, code).await; + } + + match response.json::().await { + Ok(app) => Ok(Some(app)), + Err(e) => { + tracing::warn!( + "Catalog endpoint response parse error for code={}: {}, falling back to search_applications", + code, e + ); + self.fallback_search_by_code(bearer_token, code).await + } + } + } + + /// Helper: fall back to search_applications and find by exact code match. + async fn fallback_search_by_code( + &self, + bearer_token: &str, + code: &str, + ) -> Result, ConnectorError> { + let apps = self.search_applications(bearer_token, Some(code)).await?; + let code_lower = code.to_lowercase(); + Ok(apps.into_iter().find(|app| { + app.code + .as_deref() + .map(|c| c.to_lowercase() == code_lower) + .unwrap_or(false) + })) + } +} + +fn application_from_catalog(item: serde_json::Value) -> Option { + let kind = item.get("kind").and_then(|v| v.as_str()).unwrap_or(""); + if kind != "app" { + return None; + } + + let id = item.get("_id").and_then(|v| v.as_i64()); + let name = item + .get("name") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let code = item + .get("code") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let description = item + .get("description") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let category = item + .get("categories") + .and_then(|v| v.as_array()) + .and_then(|arr| arr.first()) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .or_else(|| { + item.get("app_type") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + }); + + Some(Application { + id, + name, + code, + description, + category, + docker_image: None, + default_port: None, + role: None, + default_env: None, + default_ports: None, + default_config_files: None, + }) } diff --git a/src/connectors/user_service/category_sync.rs b/src/connectors/user_service/category_sync.rs index 29363424..e0d713d5 100644 --- a/src/connectors/user_service/category_sync.rs +++ b/src/connectors/user_service/category_sync.rs @@ -7,7 +7,6 @@ use std::sync::Arc; use tracing::Instrument; use super::{CategoryInfo, UserServiceConnector}; -use crate::connectors::ConnectorError; /// Sync categories from User Service to local database /// diff --git a/src/connectors/user_service/client.rs b/src/connectors/user_service/client.rs index f81e6c62..70d808f2 100644 --- a/src/connectors/user_service/client.rs +++ b/src/connectors/user_service/client.rs @@ -25,8 +25,6 @@ impl UserServiceClient { let timeout = std::time::Duration::from_secs(config.timeout_secs); let http_client = reqwest::Client::builder() .timeout(timeout) - .http1_only() // Force HTTP/1.1 since uwsgi might not handle HTTP/2 well - .pool_max_idle_per_host(0) // Disable connection pooling to prevent stale connections .build() .expect("Failed to create HTTP client"); @@ -414,28 +412,14 @@ impl UserServiceConnector for UserServiceClient { template_id = stack_template_id ); - // Build "where" filter as JSON and let reqwest handle URL encoding - #[derive(Serialize)] - struct WhereFilter<'a> { - external_id: i32, - product_type: &'a str, - } - - let where_filter = WhereFilter { - external_id: stack_template_id, - product_type: "template", - }; - - let where_json = serde_json::to_string(&where_filter).map_err(|e| { - ConnectorError::HttpError(format!( - "Failed to serialize where filter for template product: {}", - e - )) - })?; + // Query /api/1.0/products?external_id={template_id}&product_type=template + let url = format!( + "{}/api/1.0/products?where={{\"external_id\":{},\"product_type\":\"template\"}}", + self.base_url, stack_template_id + ); - let url = format!("{}/api/1.0/products", self.base_url); + let mut req = self.http_client.get(&url); - let mut req = self.http_client.get(&url).query(&[("where", &where_json)]); if let Some(auth) = self.auth_header() { req = req.header("Authorization", auth); } diff --git a/src/connectors/user_service/deployment_validator.rs b/src/connectors/user_service/deployment_validator.rs index ecbfe027..77b93770 100644 --- a/src/connectors/user_service/deployment_validator.rs +++ b/src/connectors/user_service/deployment_validator.rs @@ -6,7 +6,7 @@ use std::sync::Arc; use tracing::Instrument; -use crate::connectors::{ConnectorError, UserServiceConnector}; +use crate::connectors::UserServiceConnector; use crate::models; /// Custom error types for deployment validation diff --git a/src/connectors/user_service/install.rs b/src/connectors/user_service/install.rs index b58a6ed9..4b9edebe 100644 --- a/src/connectors/user_service/install.rs +++ b/src/connectors/user_service/install.rs @@ -55,7 +55,7 @@ impl UserServiceClient { &self, bearer_token: &str, ) -> Result, ConnectorError> { - let url = format!("{}/installations", self.base_url); + let url = format!("{}/api/1.0/installations", self.base_url); let response = self .http_client @@ -89,7 +89,7 @@ impl UserServiceClient { bearer_token: &str, installation_id: i64, ) -> Result { - let url = format!("{}/installations/{}", self.base_url, installation_id); + let url = format!("{}/api/1.0/installations/{}", self.base_url, installation_id); let response = self .http_client diff --git a/src/connectors/user_service/marketplace_webhook.rs b/src/connectors/user_service/marketplace_webhook.rs index 780f23c8..ef2a7e17 100644 --- a/src/connectors/user_service/marketplace_webhook.rs +++ b/src/connectors/user_service/marketplace_webhook.rs @@ -36,10 +36,10 @@ pub struct MarketplaceWebhookPayload { /// Template description pub description: Option, - /// Price in specified currency (if not free) + /// Price in specified currency (set by creator during submission) pub price: Option, - /// Billing cycle: "one_time" or "monthly"/"yearly" + /// Billing cycle: "free", "one_time", or "subscription" #[serde(skip_serializing_if = "Option::is_none")] pub billing_cycle: Option, @@ -50,7 +50,7 @@ pub struct MarketplaceWebhookPayload { /// Creator/vendor user ID from Stacker pub vendor_user_id: Option, - /// Vendor name or email + /// Vendor display name (creator_name from template) pub vendor_name: Option, /// Category of template @@ -60,6 +60,34 @@ pub struct MarketplaceWebhookPayload { /// Tags/keywords #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option, + + /// Full description (long_description from template) + #[serde(skip_serializing_if = "Option::is_none")] + pub long_description: Option, + + /// Tech stack metadata (JSON object of services/apps) + #[serde(skip_serializing_if = "Option::is_none")] + pub tech_stack: Option, + + /// Creator display name + #[serde(skip_serializing_if = "Option::is_none")] + pub creator_name: Option, + + /// Total deployments count + #[serde(skip_serializing_if = "Option::is_none")] + pub deploy_count: Option, + + /// Total views count + #[serde(skip_serializing_if = "Option::is_none")] + pub view_count: Option, + + /// When the template was approved + #[serde(skip_serializing_if = "Option::is_none")] + pub approved_at: Option, + + /// Minimum plan required to deploy + #[serde(skip_serializing_if = "Option::is_none")] + pub required_plan_name: Option, } /// Response from User Service webhook endpoint @@ -90,8 +118,9 @@ impl WebhookSenderConfig { /// Create from environment variables pub fn from_env() -> Result { let base_url = std::env::var("URL_SERVER_USER") + .or_else(|_| std::env::var("USER_SERVICE_URL")) .or_else(|_| std::env::var("USER_SERVICE_BASE_URL")) - .map_err(|_| "USER_SERVICE_BASE_URL not configured".to_string())?; + .map_err(|_| "USER_SERVICE_URL not configured".to_string())?; let bearer_token = std::env::var("STACKER_SERVICE_TOKEN") .map_err(|_| "STACKER_SERVICE_TOKEN not configured".to_string())?; @@ -159,17 +188,28 @@ impl MarketplaceWebhookSender { .short_description .clone() .or_else(|| template.long_description.clone()), - price: None, // Pricing not stored in Stacker (User Service responsibility) - billing_cycle: None, - currency: None, + price: template.price, + billing_cycle: template.billing_cycle.clone(), + currency: template.currency.clone(), vendor_user_id: Some(vendor_id.to_string()), - vendor_name: Some(vendor_id.to_string()), + vendor_name: template.creator_name.clone(), category: category_code, tags: if let serde_json::Value::Array(_) = template.tags { Some(template.tags.clone()) } else { None }, + long_description: template.long_description.clone(), + tech_stack: if template.tech_stack != serde_json::json!({}) { + Some(template.tech_stack.clone()) + } else { + None + }, + creator_name: template.creator_name.clone(), + deploy_count: template.deploy_count, + view_count: template.view_count, + approved_at: template.approved_at.map(|dt| dt.to_rfc3339()), + required_plan_name: template.required_plan_name.clone(), }; self.send_webhook(&payload).instrument(span).await @@ -198,17 +238,28 @@ impl MarketplaceWebhookSender { .short_description .clone() .or_else(|| template.long_description.clone()), - price: None, - billing_cycle: None, - currency: None, + price: template.price, + billing_cycle: template.billing_cycle.clone(), + currency: template.currency.clone(), vendor_user_id: Some(vendor_id.to_string()), - vendor_name: Some(vendor_id.to_string()), + vendor_name: template.creator_name.clone(), category: category_code, tags: if let serde_json::Value::Array(_) = template.tags { Some(template.tags.clone()) } else { None }, + long_description: template.long_description.clone(), + tech_stack: if template.tech_stack != serde_json::json!({}) { + Some(template.tech_stack.clone()) + } else { + None + }, + creator_name: template.creator_name.clone(), + deploy_count: template.deploy_count, + view_count: template.view_count, + approved_at: template.approved_at.map(|dt| dt.to_rfc3339()), + required_plan_name: template.required_plan_name.clone(), }; self.send_webhook(&payload).instrument(span).await @@ -239,6 +290,13 @@ impl MarketplaceWebhookSender { vendor_name: None, category: None, tags: None, + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; self.send_webhook(&payload).instrument(span).await @@ -357,6 +415,13 @@ mod tests { vendor_name: Some("alice@example.com".to_string()), category: Some("AI Agents".to_string()), tags: Some(serde_json::json!(["ai", "agents"])), + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; let json = serde_json::to_string(&payload).expect("Failed to serialize"); @@ -385,6 +450,13 @@ mod tests { vendor_name: None, category: None, tags: None, + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; let json = serde_json::to_string(&payload).expect("Failed to serialize"); @@ -409,6 +481,13 @@ mod tests { vendor_name: Some("vendor@example.com".to_string()), category: Some("CMS".to_string()), tags: Some(serde_json::json!(["cms", "wordpress"])), + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; assert_eq!(payload.action, "template_approved"); @@ -433,6 +512,13 @@ mod tests { vendor_name: Some("vendor@example.com".to_string()), category: Some("CMS".to_string()), tags: Some(serde_json::json!(["cms", "wordpress", "v2"])), + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; assert_eq!(payload.action, "template_updated"); @@ -457,6 +543,13 @@ mod tests { vendor_name: None, category: Some("CMS".to_string()), tags: Some(serde_json::json!(["blog", "free"])), + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; assert_eq!(payload.action, "template_approved"); @@ -545,6 +638,13 @@ mod tests { vendor_name: Some("John Doe".to_string()), category: Some("Enterprise".to_string()), tags: Some(serde_json::json!(["enterprise", "complex", "saas"])), + long_description: Some("Full enterprise description".to_string()), + tech_stack: Some(serde_json::json!({"nginx": "1.25", "postgres": "16"})), + creator_name: Some("John Doe".to_string()), + deploy_count: Some(42), + view_count: Some(1337), + approved_at: Some("2026-02-11T10:00:00Z".to_string()), + required_plan_name: Some("starter".to_string()), }; // Verify all fields are accessible @@ -571,6 +671,13 @@ mod tests { vendor_name: None, category: None, tags: None, + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; // Should serialize without errors even with all optional fields as None diff --git a/src/connectors/user_service/stack.rs b/src/connectors/user_service/stack.rs index c70cc807..484df048 100644 --- a/src/connectors/user_service/stack.rs +++ b/src/connectors/user_service/stack.rs @@ -23,20 +23,22 @@ impl UserServiceClient { query: Option<&str>, ) -> Result, ConnectorError> { let url = format!("{}/stack_view", self.base_url); - + tracing::info!("Fetching stack_view from {}", url); let start = std::time::Instant::now(); - + // Create a dedicated client for stack_view with longer timeout (30s for large response) // and explicit connection settings to avoid connection reuse issues let client = reqwest::Client::builder() .timeout(std::time::Duration::from_secs(30)) .connect_timeout(std::time::Duration::from_secs(10)) .http1_only() - .pool_max_idle_per_host(0) // Don't reuse connections + .pool_max_idle_per_host(0) // Don't reuse connections .build() - .map_err(|e| ConnectorError::Internal(format!("Failed to create HTTP client: {}", e)))?; - + .map_err(|e| { + ConnectorError::Internal(format!("Failed to create HTTP client: {}", e)) + })?; + let response = client .get(&url) .header("Authorization", format!("Bearer {}", bearer_token)) @@ -48,28 +50,38 @@ impl UserServiceClient { })?; let status = response.status(); - tracing::info!("stack_view responded with status {} in {:?}", status, start.elapsed()); + tracing::info!( + "stack_view responded with status {} in {:?}", + status, + start.elapsed() + ); if !status.is_success() { let body = response.text().await.unwrap_or_default(); return Err(ConnectorError::HttpError(format!( "User Service error ({}): {}", - status.as_u16(), body + status.as_u16(), + body ))); } tracing::info!("Reading stack_view JSON body..."); let json_start = std::time::Instant::now(); - - let wrapper: StackViewResponse = response - .json() - .await - .map_err(|e| { - tracing::error!("Failed to parse stack_view JSON after {:?}: {:?}", json_start.elapsed(), e); - ConnectorError::InvalidResponse(e.to_string()) - })?; - tracing::info!("Parsed stack_view with {} items in {:?}", wrapper._items.len(), json_start.elapsed()); + let wrapper: StackViewResponse = response.json().await.map_err(|e| { + tracing::error!( + "Failed to parse stack_view JSON after {:?}: {:?}", + json_start.elapsed(), + e + ); + ConnectorError::InvalidResponse(e.to_string()) + })?; + + tracing::info!( + "Parsed stack_view with {} items in {:?}", + wrapper._items.len(), + json_start.elapsed() + ); let mut apps: Vec = wrapper ._items @@ -144,5 +156,9 @@ pub(crate) fn application_from_stack_view(item: StackViewItem) -> Application { category, docker_image, default_port, + role: None, + default_env: None, + default_ports: None, + default_config_files: None, } } diff --git a/src/console/commands/mq/listener.rs b/src/console/commands/mq/listener.rs index ad95f874..ca0556ad 100644 --- a/src/console/commands/mq/listener.rs +++ b/src/console/commands/mq/listener.rs @@ -10,16 +10,50 @@ use lapin::options::{BasicAckOptions, BasicConsumeOptions}; use lapin::types::FieldTable; use serde_derive::{Deserialize, Serialize}; use sqlx::PgPool; +use std::time::Duration; +use tokio::time::sleep; pub struct ListenCommand {} +use serde_json::Value; + +fn string_or_number<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let v: Value = serde::Deserialize::deserialize(deserializer)?; + match v { + Value::String(s) => Ok(s), + Value::Number(n) => Ok(n.to_string()), + _ => Err(serde::de::Error::custom("expected string or number")), + } +} + +fn optional_string_or_number<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + let v: Option = serde::Deserialize::deserialize(deserializer)?; + match v { + Some(Value::String(s)) => Ok(Some(s)), + Some(Value::Number(n)) => Ok(Some(n.to_string())), + Some(Value::Null) | None => Ok(None), + _ => Err(serde::de::Error::custom("expected string, number, or null")), + } +} + #[derive(Serialize, Deserialize, Debug)] struct ProgressMessage { + #[serde(deserialize_with = "string_or_number")] id: String, + #[serde(default, deserialize_with = "optional_string_or_number")] deploy_id: Option, + #[serde(default)] + deployment_hash: Option, alert: i32, message: String, status: String, + #[serde(deserialize_with = "string_or_number")] progress: String, } @@ -38,82 +72,173 @@ impl crate::console::commands::CallableTrait for ListenCommand { .expect("Failed to connect to database."); let db_pool = web::Data::new(db_pool); - - println!("Declare exchange"); - let mq_manager = MqManager::try_new(settings.amqp.connection_string())?; let queue_name = "stacker_listener"; - // let queue_name = "install_progress_m383emvfP9zQKs8lkgSU_Q"; - // let queue_name = "install_progress_hy181TZa4DaabUZWklsrxw"; - let consumer_channel = mq_manager - .consume("install_progress", queue_name, "install.progress.*.*.*") - .await?; - - println!("Declare queue"); - let mut consumer = consumer_channel - .basic_consume( - queue_name, - "console_listener", - BasicConsumeOptions::default(), - FieldTable::default(), - ) - .await - .expect("Basic consume"); + + // Outer loop for reconnection on connection errors + loop { + println!("Connecting to RabbitMQ..."); + + // Try to establish connection with retry + let mq_manager = match Self::connect_with_retry(&settings.amqp.connection_string()).await { + Ok(m) => m, + Err(e) => { + eprintln!("Failed to connect to RabbitMQ after retries: {}", e); + sleep(Duration::from_secs(5)).await; + continue; + } + }; + + let consumer_channel = match mq_manager + .consume("install_progress", queue_name, "install.progress.*.*.*") + .await + { + Ok(c) => c, + Err(e) => { + eprintln!("Failed to create consumer: {}", e); + sleep(Duration::from_secs(5)).await; + continue; + } + }; - println!("Waiting for messages .."); - while let Some(delivery) = consumer.next().await { - // println!("checking messages delivery {:?}", delivery); - let delivery = delivery.expect("error in consumer"); - let s: String = match String::from_utf8(delivery.data.to_owned()) { - //delivery.data is of type Vec - Ok(v) => v, - Err(e) => panic!("Invalid UTF-8 sequence: {}", e), + println!("Declare queue"); + let mut consumer = match consumer_channel + .basic_consume( + queue_name, + "console_listener", + BasicConsumeOptions::default(), + FieldTable::default(), + ) + .await + { + Ok(c) => c, + Err(e) => { + eprintln!("Failed basic_consume: {}", e); + sleep(Duration::from_secs(5)).await; + continue; + } }; - let statuses = vec![ - "completed", - "paused", - "failed", - "in_progress", - "error", - "wait_resume", - "wait_start", - "confirmed", - ]; - match serde_json::from_str::(&s) { - Ok(msg) => { - println!("message {:?}", s); + println!("Waiting for messages .."); + + // Inner loop for processing messages + while let Some(delivery_result) = consumer.next().await { + let delivery = match delivery_result { + Ok(d) => d, + Err(e) => { + eprintln!("Consumer error (will reconnect): {}", e); + break; // Break inner loop to reconnect + } + }; + + let s: String = match String::from_utf8(delivery.data.to_owned()) { + Ok(v) => v, + Err(e) => { + eprintln!("Invalid UTF-8 sequence: {}", e); + if let Err(ack_err) = delivery.ack(BasicAckOptions::default()).await { + eprintln!("Failed to ack invalid message: {}", ack_err); + } + continue; + } + }; - if statuses.contains(&(msg.status.as_ref())) && msg.deploy_id.is_some() { - println!("Update DB on status change .."); - let id = msg - .deploy_id - .unwrap() - .parse::() - .map_err(|_err| "Could not parse deployment id".to_string())?; + let statuses = vec![ + "completed", + "paused", + "failed", + "in_progress", + "error", + "wait_resume", + "wait_start", + "confirmed", + ]; + + match serde_json::from_str::(&s) { + Ok(msg) => { + println!("message {:?}", s); - match deployment::fetch(db_pool.get_ref(), id).await? { - Some(mut row) => { - row.status = msg.status; - row.updated_at = Utc::now(); - println!( - "Deployment {} updated with status {}", - &id, &row.status - ); - deployment::update(db_pool.get_ref(), row).await?; + if statuses.contains(&(msg.status.as_ref())) { + // Try to find deployment by deploy_id or deployment_hash + let deployment_result = if let Some(ref deploy_id_str) = msg.deploy_id { + // Try deploy_id first (numeric ID) + if let Ok(id) = deploy_id_str.parse::() { + deployment::fetch(db_pool.get_ref(), id).await + } else if let Some(ref hash) = msg.deployment_hash { + // deploy_id might be the hash string + deployment::fetch_by_deployment_hash(db_pool.get_ref(), hash).await + } else { + // Try deploy_id as hash + deployment::fetch_by_deployment_hash(db_pool.get_ref(), deploy_id_str).await + } + } else if let Some(ref hash) = msg.deployment_hash { + // Use deployment_hash + deployment::fetch_by_deployment_hash(db_pool.get_ref(), hash).await + } else { + // No identifier available + println!("No deploy_id or deployment_hash in message"); + if let Err(ack_err) = delivery.ack(BasicAckOptions::default()).await { + eprintln!("Failed to ack: {}", ack_err); + } + continue; + }; + + match deployment_result { + Ok(Some(mut row)) => { + row.status = msg.status; + row.updated_at = Utc::now(); + println!( + "Deployment {} updated with status {}", + &row.id, &row.status + ); + if let Err(e) = deployment::update(db_pool.get_ref(), row).await { + eprintln!("Failed to update deployment: {}", e); + } + } + Ok(None) => println!("Deployment record was not found in db"), + Err(e) => eprintln!("Failed to fetch deployment: {}", e), } - None => println!("Deployment record was not found in db"), } } + Err(_err) => { + tracing::debug!("Invalid message format {:?}", _err) + } } - Err(_err) => { - tracing::debug!("Invalid message format {:?}", _err) + + if let Err(ack_err) = delivery.ack(BasicAckOptions::default()).await { + eprintln!("Failed to ack message: {}", ack_err); + break; // Connection likely lost, reconnect } } - - delivery.ack(BasicAckOptions::default()).await.expect("ack"); + + println!("Consumer loop ended, reconnecting in 5s..."); + sleep(Duration::from_secs(5)).await; } - - Ok(()) }) } } + +impl ListenCommand { + async fn connect_with_retry(connection_string: &str) -> Result { + let max_retries = 10; + let mut retry_delay = Duration::from_secs(1); + + for attempt in 1..=max_retries { + println!("RabbitMQ connection attempt {}/{}", attempt, max_retries); + + match MqManager::try_new(connection_string.to_string()) { + Ok(manager) => { + println!("Connected to RabbitMQ"); + return Ok(manager); + } + Err(e) => { + eprintln!("Connection attempt {} failed: {}", attempt, e); + if attempt < max_retries { + sleep(retry_delay).await; + retry_delay = std::cmp::min(retry_delay * 2, Duration::from_secs(30)); + } + } + } + } + + Err(format!("Failed to connect after {} attempts", max_retries)) + } +} diff --git a/src/db/marketplace.rs b/src/db/marketplace.rs index 5f40b283..f9e91112 100644 --- a/src/db/marketplace.rs +++ b/src/db/marketplace.rs @@ -1,4 +1,4 @@ -use crate::models::{StackCategory, StackTemplate, StackTemplateVersion}; +use crate::models::{StackCategory, StackTemplate, StackTemplateReview, StackTemplateVersion}; use sqlx::PgPool; use tracing::Instrument; @@ -26,6 +26,9 @@ pub async fn list_approved( t.view_count, t.deploy_count, t.required_plan_name, + t.price, + t.billing_cycle, + t.currency, t.created_at, t.updated_at, t.approved_at @@ -107,6 +110,9 @@ pub async fn get_by_slug_and_user( t.view_count, t.deploy_count, t.required_plan_name, + t.price, + t.billing_cycle, + t.currency, t.created_at, t.updated_at, t.approved_at @@ -150,6 +156,9 @@ pub async fn get_by_slug_with_latest( t.view_count, t.deploy_count, t.required_plan_name, + t.price, + t.billing_cycle, + t.currency, t.created_at, t.updated_at, t.approved_at @@ -218,7 +227,10 @@ pub async fn get_by_id( t.created_at, t.updated_at, t.approved_at, - t.required_plan_name + t.required_plan_name, + t.price, + t.billing_cycle, + t.currency FROM stack_template t LEFT JOIN stack_category c ON t.category_id = c.id WHERE t.id = $1"#, @@ -246,16 +258,21 @@ pub async fn create_draft( category_code: Option<&str>, tags: serde_json::Value, tech_stack: serde_json::Value, + price: f64, + billing_cycle: &str, + currency: &str, ) -> Result { let query_span = tracing::info_span!("marketplace_create_draft", slug = %slug); + let price_f64 = price; + let rec = sqlx::query_as!( StackTemplate, r#"INSERT INTO stack_template ( creator_user_id, creator_name, name, slug, short_description, long_description, category_id, - tags, tech_stack, status - ) VALUES ($1,$2,$3,$4,$5,$6,(SELECT id FROM stack_category WHERE name = $7),$8,$9,'draft') + tags, tech_stack, status, price, billing_cycle, currency + ) VALUES ($1,$2,$3,$4,$5,$6,(SELECT id FROM stack_category WHERE name = $7),$8,$9,'draft',$10,$11,$12) RETURNING id, creator_user_id, @@ -273,6 +290,9 @@ pub async fn create_draft( view_count, deploy_count, required_plan_name, + price, + billing_cycle, + currency, created_at, updated_at, approved_at @@ -285,7 +305,10 @@ pub async fn create_draft( long_description, category_code, tags, - tech_stack + tech_stack, + price_f64, + billing_cycle, + currency ) .fetch_one(pool) .instrument(query_span) @@ -370,6 +393,9 @@ pub async fn update_metadata( category_code: Option<&str>, tags: Option, tech_stack: Option, + price: Option, + billing_cycle: Option<&str>, + currency: Option<&str>, ) -> Result { let query_span = tracing::info_span!("marketplace_update_metadata", template_id = %template_id); @@ -397,7 +423,10 @@ pub async fn update_metadata( long_description = COALESCE($4, long_description), category_id = COALESCE((SELECT id FROM stack_category WHERE name = $5), category_id), tags = COALESCE($6, tags), - tech_stack = COALESCE($7, tech_stack) + tech_stack = COALESCE($7, tech_stack), + price = COALESCE($8, price), + billing_cycle = COALESCE($9, billing_cycle), + currency = COALESCE($10, currency) WHERE id = $1::uuid"#, template_id, name, @@ -405,7 +434,10 @@ pub async fn update_metadata( long_description, category_code, tags, - tech_stack + tech_stack, + price, + billing_cycle, + currency ) .execute(pool) .instrument(query_span) @@ -423,7 +455,7 @@ pub async fn submit_for_review(pool: &PgPool, template_id: &uuid::Uuid) -> Resul tracing::info_span!("marketplace_submit_for_review", template_id = %template_id); let res = sqlx::query!( - r#"UPDATE stack_template SET status = 'submitted' WHERE id = $1::uuid AND status IN ('draft','rejected')"#, + r#"UPDATE stack_template SET status = 'submitted' WHERE id = $1::uuid AND status IN ('draft','rejected','needs_changes')"#, template_id ) .execute(pool) @@ -437,6 +469,85 @@ pub async fn submit_for_review(pool: &PgPool, template_id: &uuid::Uuid) -> Resul Ok(res.rows_affected() > 0) } +/// Resubmit a template for review with a new version. +/// Allowed from statuses: rejected, needs_changes, approved (for version updates). +/// Creates a new version, resets status to 'submitted'. +pub async fn resubmit_with_new_version( + pool: &PgPool, + template_id: &uuid::Uuid, + version: &str, + stack_definition: serde_json::Value, + definition_format: Option<&str>, + changelog: Option<&str>, +) -> Result { + let query_span = + tracing::info_span!("marketplace_resubmit_with_new_version", template_id = %template_id); + + let mut tx = pool.begin().await.map_err(|e| { + tracing::error!("tx begin error: {:?}", e); + "Internal Server Error".to_string() + })?; + + // Update status to submitted (allowed from rejected, needs_changes, approved) + let res = sqlx::query!( + r#"UPDATE stack_template SET status = 'submitted', updated_at = now() + WHERE id = $1::uuid AND status IN ('rejected', 'needs_changes', 'approved')"#, + template_id + ) + .execute(&mut *tx) + .instrument(query_span.clone()) + .await + .map_err(|e| { + tracing::error!("resubmit status update error: {:?}", e); + "Internal Server Error".to_string() + })?; + + if res.rows_affected() == 0 { + return Err("Template cannot be resubmitted from its current status".to_string()); + } + + // Clear previous latest version + sqlx::query!( + r#"UPDATE stack_template_version SET is_latest = false WHERE template_id = $1 AND is_latest = true"#, + template_id + ) + .execute(&mut *tx) + .instrument(query_span.clone()) + .await + .map_err(|e| { + tracing::error!("clear latest version error: {:?}", e); + "Internal Server Error".to_string() + })?; + + // Insert new version + let ver = sqlx::query_as!( + StackTemplateVersion, + r#"INSERT INTO stack_template_version ( + template_id, version, stack_definition, definition_format, changelog, is_latest + ) VALUES ($1,$2,$3,$4,$5,true) + RETURNING id, template_id, version, stack_definition, definition_format, changelog, is_latest, created_at"#, + template_id, + version, + stack_definition, + definition_format, + changelog + ) + .fetch_one(&mut *tx) + .instrument(query_span) + .await + .map_err(|e| { + tracing::error!("insert new version error: {:?}", e); + "Internal Server Error".to_string() + })?; + + tx.commit().await.map_err(|e| { + tracing::error!("tx commit error: {:?}", e); + "Internal Server Error".to_string() + })?; + + Ok(ver) +} + pub async fn list_mine(pool: &PgPool, user_id: &str) -> Result, String> { let query_span = tracing::info_span!("marketplace_list_mine", user = %user_id); @@ -459,6 +570,9 @@ pub async fn list_mine(pool: &PgPool, user_id: &str) -> Result Result, S t.view_count, t.deploy_count, t.required_plan_name, + t.price, + t.billing_cycle, + t.currency, t.created_at, t.updated_at, t.approved_at FROM stack_template t LEFT JOIN stack_category c ON t.category_id = c.id - WHERE t.status = 'submitted' - ORDER BY t.created_at ASC"# + WHERE t.status IN ('submitted', 'approved') + ORDER BY + CASE t.status + WHEN 'submitted' THEN 0 + WHEN 'approved' THEN 1 + END, + t.created_at ASC"# ) .fetch_all(pool) .instrument(query_span) @@ -523,7 +645,7 @@ pub async fn admin_decide( decision: &str, review_reason: Option<&str>, ) -> Result { - let query_span = tracing::info_span!("marketplace_admin_decide", template_id = %template_id, decision = %decision); + let _query_span = tracing::info_span!("marketplace_admin_decide", template_id = %template_id, decision = %decision); let valid = ["approved", "rejected", "needs_changes"]; if !valid.contains(&decision) { @@ -579,6 +701,55 @@ pub async fn admin_decide( Ok(true) } +/// Unapprove a template: set status back to 'submitted' and clear approved_at. +/// This hides the template from the marketplace until re-approved. +pub async fn admin_unapprove( + pool: &PgPool, + template_id: &uuid::Uuid, + reviewer_user_id: &str, + reason: Option<&str>, +) -> Result { + let _query_span = tracing::info_span!("marketplace_admin_unapprove", template_id = %template_id); + + let mut tx = pool.begin().await.map_err(|e| { + tracing::error!("tx begin error: {:?}", e); + "Internal Server Error".to_string() + })?; + + // Insert a review record documenting the unapproval + sqlx::query!( + r#"INSERT INTO stack_template_review (template_id, reviewer_user_id, decision, review_reason, reviewed_at) VALUES ($1::uuid, $2, 'unapproved', $3, now())"#, + template_id, + reviewer_user_id, + reason + ) + .execute(&mut *tx) + .await + .map_err(|e| { + tracing::error!("insert unapproval review error: {:?}", e); + "Internal Server Error".to_string() + })?; + + // Set status back to 'submitted' and clear approved_at + let result = sqlx::query!( + r#"UPDATE stack_template SET status = 'submitted', approved_at = NULL WHERE id = $1::uuid AND status = 'approved'"#, + template_id, + ) + .execute(&mut *tx) + .await + .map_err(|e| { + tracing::error!("unapprove template error: {:?}", e); + "Internal Server Error".to_string() + })?; + + tx.commit().await.map_err(|e| { + tracing::error!("tx commit error: {:?}", e); + "Internal Server Error".to_string() + })?; + + Ok(result.rows_affected() > 0) +} + /// Sync categories from User Service to local mirror /// Upserts category data (id, name, title, metadata) pub async fn sync_categories( @@ -686,3 +857,84 @@ pub async fn get_categories(pool: &PgPool) -> Result, String> "Internal Server Error".to_string() }) } + +/// List all versions for a template, ordered by creation date descending +pub async fn list_versions_by_template( + pool: &PgPool, + template_id: uuid::Uuid, +) -> Result, String> { + let query_span = tracing::info_span!("list_versions_by_template", template_id = %template_id); + + sqlx::query_as::<_, StackTemplateVersion>( + r#" + SELECT id, template_id, version, stack_definition, definition_format, + changelog, is_latest, created_at + FROM stack_template_version + WHERE template_id = $1 + ORDER BY created_at DESC + "#, + ) + .bind(template_id) + .fetch_all(pool) + .instrument(query_span) + .await + .map_err(|e| { + tracing::error!("list_versions_by_template error: {:?}", e); + "Internal Server Error".to_string() + }) +} + +/// List all reviews for a template, ordered by submission date descending +pub async fn list_reviews_by_template( + pool: &PgPool, + template_id: uuid::Uuid, +) -> Result, String> { + let query_span = tracing::info_span!("list_reviews_by_template", template_id = %template_id); + + sqlx::query_as::<_, StackTemplateReview>( + r#" + SELECT id, template_id, reviewer_user_id, decision, review_reason, + security_checklist, submitted_at, reviewed_at + FROM stack_template_review + WHERE template_id = $1 + ORDER BY submitted_at DESC + "#, + ) + .bind(template_id) + .fetch_all(pool) + .instrument(query_span) + .await + .map_err(|e| { + tracing::error!("list_reviews_by_template error: {:?}", e); + "Internal Server Error".to_string() + }) +} + +/// Save a security scan result as a review record with security_checklist populated +pub async fn save_security_scan( + pool: &PgPool, + template_id: &uuid::Uuid, + reviewer_user_id: &str, + security_checklist: serde_json::Value, +) -> Result { + let query_span = tracing::info_span!("save_security_scan", template_id = %template_id); + + sqlx::query_as::<_, StackTemplateReview>( + r#" + INSERT INTO stack_template_review + (template_id, reviewer_user_id, decision, review_reason, security_checklist, submitted_at, reviewed_at) + VALUES ($1, $2, 'pending', 'Automated security scan', $3, now(), now()) + RETURNING id, template_id, reviewer_user_id, decision, review_reason, security_checklist, submitted_at, reviewed_at + "#, + ) + .bind(template_id) + .bind(reviewer_user_id) + .bind(&security_checklist) + .fetch_one(pool) + .instrument(query_span) + .await + .map_err(|e| { + tracing::error!("save_security_scan error: {:?}", e); + "Internal Server Error".to_string() + }) +} diff --git a/src/db/project_app.rs b/src/db/project_app.rs index d2da5011..e17e535e 100644 --- a/src/db/project_app.rs +++ b/src/db/project_app.rs @@ -49,6 +49,44 @@ pub async fn fetch_by_project( }) } +/// Fetch all apps for a specific deployment. +/// Falls back to project-level apps if no deployment-scoped apps exist (backward compatibility). +pub async fn fetch_by_deployment( + pool: &PgPool, + project_id: i32, + deployment_id: i32, +) -> Result, String> { + let query_span = tracing::info_span!("Fetch apps by deployment id"); + let apps = sqlx::query_as!( + models::ProjectApp, + r#" + SELECT * FROM project_app + WHERE project_id = $1 AND deployment_id = $2 + ORDER BY deploy_order ASC NULLS LAST, id ASC + "#, + project_id, + deployment_id + ) + .fetch_all(pool) + .instrument(query_span) + .await + .map_err(|e| { + tracing::error!("Failed to fetch apps for deployment: {:?}", e); + format!("Failed to fetch apps by deployment: {}", e) + })?; + + // Backward compatibility: if no deployment-scoped apps, fall back to project-level (deployment_id IS NULL) + if apps.is_empty() { + tracing::debug!( + "No deployment-scoped apps for deployment_id={}, falling back to project-level apps", + deployment_id + ); + return fetch_by_project(pool, project_id).await; + } + + Ok(apps) +} + /// Fetch a single app by project ID and app code pub async fn fetch_by_project_and_code( pool: &PgPool, @@ -84,9 +122,10 @@ pub async fn insert(pool: &PgPool, app: &models::ProjectApp) -> Result Result Result Result Result { Ok(result.rows_affected() > 0) } +/// Delete an app by project ID and app code +pub async fn delete_by_project_and_code( + pool: &PgPool, + project_id: i32, + code: &str, +) -> Result { + let query_span = tracing::info_span!("Deleting app by project and code"); + let result = sqlx::query( + "DELETE FROM project_app WHERE project_id = $1 AND code = $2", + ) + .bind(project_id) + .bind(code) + .execute(pool) + .instrument(query_span) + .await + .map_err(|e| { + tracing::error!("Failed to delete app by project and code: {:?}", e); + format!("Failed to delete app: {}", e) + })?; + + Ok(result.rows_affected() > 0) +} + /// Delete all apps for a project pub async fn delete_by_project(pool: &PgPool, project_id: i32) -> Result { let query_span = tracing::info_span!("Deleting all apps for project"); diff --git a/src/db/server.rs b/src/db/server.rs index 5cc7f0a5..c65e34f5 100644 --- a/src/db/server.rs +++ b/src/db/server.rs @@ -42,6 +42,51 @@ pub async fn fetch_by_user(pool: &PgPool, user_id: &str) -> Result Result, String> { + let query_span = tracing::info_span!("Fetch servers by user id with provider info."); + sqlx::query_as!( + models::ServerWithProvider, + r#" + SELECT + s.id, + s.user_id, + s.project_id, + s.cloud_id, + c.provider as cloud, + s.region, + s.zone, + s.server, + s.os, + s.disk_type, + s.created_at, + s.updated_at, + s.srv_ip, + s.ssh_port, + s.ssh_user, + s.vault_key_path, + s.connection_mode, + s.key_status, + s.name + FROM server s + LEFT JOIN cloud c ON s.cloud_id = c.id + WHERE s.user_id=$1 + ORDER BY s.created_at DESC + "#, + user_id + ) + .fetch_all(pool) + .instrument(query_span) + .await + .map_err(|err| { + tracing::error!("Failed to fetch servers with provider, error: {:?}", err); + "".to_string() + }) +} + pub async fn fetch_by_project( pool: &PgPool, project_id: i32, @@ -73,6 +118,7 @@ pub async fn insert(pool: &PgPool, mut server: models::Server) -> Result Result Result bool { - // Docker named volumes typically don't contain special characters or slashes - // They are alphanumeric and may include underscores or hyphens - self.host_path - .as_ref() - .unwrap() - .chars() - .all(|c| c.is_alphanumeric() || c == '_' || c == '-') + // Named volumes have no path separators and don't start with . or ~ + // Bind mounts contain '/' or start with './' or '~' + match self.host_path.as_deref() { + Some(p) if !p.is_empty() => { + let result = !p.contains('/') && !p.starts_with('.') && !p.starts_with('~'); + tracing::debug!("is_named_docker_volume: '{}' => {}", p, result); + result + } + _ => false, + } } } @@ -60,12 +63,26 @@ impl Volume { /// Convert to ComposeVolume with optional custom base directory /// If base_dir is None, uses DEFAULT_DEPLOY_DIR env var or "/home/trydirect" pub fn to_compose_volume(&self, base_dir: Option<&str>) -> dctypes::ComposeVolume { + let host_path = self.host_path.clone().unwrap_or_else(String::default); + + if self.is_named_docker_volume() { + tracing::debug!("Named volume '{}' — skipping driver_opts", host_path); + return dctypes::ComposeVolume { + driver: None, + driver_opts: Default::default(), + external: None, + labels: Default::default(), + name: Some(host_path), + }; + } + + tracing::debug!("Bind mount volume '{}' — adding driver_opts with base dir", host_path); + let default_base = std::env::var("DEFAULT_DEPLOY_DIR").unwrap_or_else(|_| "/home/trydirect".to_string()); let base = base_dir.unwrap_or(&default_base); let mut driver_opts = IndexMap::default(); - let host_path = self.host_path.clone().unwrap_or_else(String::default); driver_opts.insert( String::from("type"), @@ -76,8 +93,11 @@ impl Volume { Some(dctypes::SingleValue::String("bind".to_string())), ); - // Use configurable base directory instead of hardcoded /root/project - let path = format!("{}/{}", base.trim_end_matches('/'), &host_path); + // Normalize to avoid duplicate slashes in bind-mount device paths. + let normalized_host = host_path + .trim_start_matches("./") + .trim_start_matches('/'); + let path = format!("{}/{}", base.trim_end_matches('/'), normalized_host); driver_opts.insert( String::from("device"), Some(dctypes::SingleValue::String(path)), @@ -92,3 +112,122 @@ impl Volume { } } } + +#[cfg(test)] +mod tests { + use super::Volume; + use docker_compose_types::SingleValue; + + #[test] + fn test_named_volume_is_not_prefixed() { + let volume = Volume { + host_path: Some("redis_data".to_string()), + container_path: Some("/data".to_string()), + }; + + let compose = volume.to_compose_volume(Some("/custom/base")); + + assert!(compose.driver.is_none()); + assert!(compose.driver_opts.is_empty()); + assert_eq!(compose.name.as_deref(), Some("redis_data")); + } + + #[test] + fn test_bind_volume_is_prefixed_with_base_dir() { + let volume = Volume { + host_path: Some("projects/app".to_string()), + container_path: Some("/var/lib/app".to_string()), + }; + + let compose = volume.to_compose_volume(Some("/srv/trydirect")); + let device = compose + .driver_opts + .get("device") + .and_then(|v| v.as_ref()); + + assert_eq!(compose.driver.as_deref(), Some("local")); + assert_eq!(compose.name.as_deref(), Some("projects/app")); + assert_eq!(device, Some(&SingleValue::String("/srv/trydirect/projects/app".to_string()))); + } + + #[test] + fn test_bind_volume_absolute_path() { + let volume = Volume { + host_path: Some("/data".to_string()), + container_path: Some("/var/lib/data".to_string()), + }; + + let compose = volume.to_compose_volume(Some("/srv/trydirect")); + let device = compose + .driver_opts + .get("device") + .and_then(|v| v.as_ref()); + + assert!(!volume.is_named_docker_volume()); + assert_eq!(compose.driver.as_deref(), Some("local")); + assert_eq!(device, Some(&SingleValue::String("/srv/trydirect/data".to_string()))); + } + + #[test] + fn test_bind_volume_relative_path() { + let volume = Volume { + host_path: Some("./data".to_string()), + container_path: Some("/var/lib/data".to_string()), + }; + + let compose = volume.to_compose_volume(Some("/srv/trydirect")); + let device = compose + .driver_opts + .get("device") + .and_then(|v| v.as_ref()); + + assert!(!volume.is_named_docker_volume()); + assert_eq!(compose.driver.as_deref(), Some("local")); + assert_eq!(device, Some(&SingleValue::String("/srv/trydirect/data".to_string()))); + } + + #[test] + fn test_is_named_docker_volume() { + let named = Volume { + host_path: Some("data_store-1".to_string()), + container_path: None, + }; + let bind = Volume { + host_path: Some("/var/lib/app".to_string()), + container_path: None, + }; + + assert!(named.is_named_docker_volume()); + assert!(!bind.is_named_docker_volume()); + } + + #[test] + fn test_named_volume_with_dots() { + // Docker allows dots in named volumes (e.g., "flowise.data") + let vol = Volume { + host_path: Some("flowise.data".to_string()), + container_path: Some("/data".to_string()), + }; + assert!(vol.is_named_docker_volume()); + + let compose = vol.to_compose_volume(Some("/srv/trydirect")); + assert!(compose.driver.is_none()); + assert!(compose.driver_opts.is_empty()); + assert_eq!(compose.name.as_deref(), Some("flowise.data")); + } + + #[test] + fn test_empty_host_path_is_not_named() { + let vol = Volume { + host_path: Some("".to_string()), + container_path: Some("/data".to_string()), + }; + assert!(!vol.is_named_docker_volume()); + + let vol_none = Volume { + host_path: None, + container_path: Some("/data".to_string()), + }; + assert!(!vol_none.is_named_docker_volume()); + } +} diff --git a/src/forms/server.rs b/src/forms/server.rs index c52d47a1..b4637cd4 100644 --- a/src/forms/server.rs +++ b/src/forms/server.rs @@ -5,25 +5,35 @@ use serde_valid::Validate; #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize, Validate)] pub struct ServerForm { + /// If provided, update this existing server instead of creating new + pub server_id: Option, + /// Reference to the cloud provider (DO, Hetzner, AWS, etc.) + pub cloud_id: Option, pub region: Option, pub zone: Option, pub server: Option, pub os: Option, pub disk_type: Option, pub srv_ip: Option, + #[serde(default = "default_ssh_port")] pub ssh_port: Option, pub ssh_user: Option, /// Optional friendly name for the server pub name: Option, /// Connection mode: "ssh" or "password" or "status_panel" pub connection_mode: Option, - /// Path in Vault where SSH key is stored (e.g., "secret/data/users/{user_id}/servers/{server_id}/ssh") + /// Path in Vault where SSH key is stored (e.g., "secret/users/{user_id}/servers/{server_id}/ssh") pub vault_key_path: Option, } +pub fn default_ssh_port() -> Option { + Some(22) +} + impl From<&ServerForm> for models::Server { fn from(val: &ServerForm) -> Self { let mut server = models::Server::default(); + server.cloud_id = val.cloud_id; server.disk_type = val.disk_type.clone(); server.region = val.region.clone(); server.server = val.server.clone(); @@ -32,7 +42,7 @@ impl From<&ServerForm> for models::Server { server.created_at = Utc::now(); server.updated_at = Utc::now(); server.srv_ip = val.srv_ip.clone(); - server.ssh_port = val.ssh_port.clone(); + server.ssh_port = val.ssh_port.clone().or_else(default_ssh_port); server.ssh_user = val.ssh_user.clone(); server.name = val.name.clone(); server.connection_mode = val @@ -48,6 +58,7 @@ impl From<&ServerForm> for models::Server { impl Into for models::Server { fn into(self) -> ServerForm { let mut form = ServerForm::default(); + form.cloud_id = self.cloud_id; form.disk_type = self.disk_type; form.region = self.region; form.server = self.server; diff --git a/src/forms/status_panel.rs b/src/forms/status_panel.rs index 177ab5e5..16b95f0d 100644 --- a/src/forms/status_panel.rs +++ b/src/forms/status_panel.rs @@ -36,7 +36,12 @@ fn default_create_action() -> String { #[derive(Debug, Deserialize, Serialize, Clone)] pub struct HealthCommandRequest { + /// App code to check health for. Use "all" or omit to get all containers. + #[serde(default = "default_health_app_code")] pub app_code: String, + /// Optional container/service name override + #[serde(default)] + pub container: Option, #[serde(default = "default_include_metrics")] pub include_metrics: bool, /// When true and app_code is "system" or empty, return system containers (status_panel, compose-agent) @@ -44,9 +49,16 @@ pub struct HealthCommandRequest { pub include_system: bool, } +fn default_health_app_code() -> String { + "all".to_string() +} + #[derive(Debug, Deserialize, Serialize, Clone)] pub struct LogsCommandRequest { pub app_code: String, + /// Optional container/service name override + #[serde(default)] + pub container: Option, #[serde(default)] pub cursor: Option, #[serde(default = "default_log_limit")] @@ -60,6 +72,9 @@ pub struct LogsCommandRequest { #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RestartCommandRequest { pub app_code: String, + /// Optional container/service name override + #[serde(default)] + pub container: Option, #[serde(default = "default_restart_force")] pub force: bool, } @@ -239,7 +254,11 @@ fn ensure_result_envelope( if actual_hash != expected_hash { return Err(format!("{} result deployment_hash mismatch", expected_type)); } - ensure_app_code(expected_type, app_code) + // Allow "all" as a special value for health checks + if app_code != "all" { + ensure_app_code(expected_type, app_code)?; + } + Ok(()) } pub fn validate_command_parameters( @@ -251,7 +270,10 @@ pub fn validate_command_parameters( let value = parameters.clone().unwrap_or_else(|| json!({})); let params: HealthCommandRequest = serde_json::from_value(value) .map_err(|err| format!("Invalid health parameters: {}", err))?; - ensure_app_code("health", ¶ms.app_code)?; + // Allow "all" as a special value to get all containers' health + if params.app_code != "all" { + ensure_app_code("health", ¶ms.app_code)?; + } serde_json::to_value(params) .map(Some) diff --git a/src/health/checks.rs b/src/health/checks.rs index c5318e3d..cf38ddff 100644 --- a/src/health/checks.rs +++ b/src/health/checks.rs @@ -37,8 +37,23 @@ impl HealthChecker { let user_service_check = timeout(CHECK_TIMEOUT, self.check_user_service()); let install_service_check = timeout(CHECK_TIMEOUT, self.check_install_service()); - let (db_result, mq_result, hub_result, redis_result, vault_result, user_result, install_result) = - tokio::join!(db_check, mq_check, hub_check, redis_check, vault_check, user_service_check, install_service_check); + let ( + db_result, + mq_result, + hub_result, + redis_result, + vault_result, + user_result, + install_result, + ) = tokio::join!( + db_check, + mq_check, + hub_check, + redis_check, + vault_check, + user_service_check, + install_service_check + ); let db_health = db_result.unwrap_or_else(|_| ComponentHealth::unhealthy("Timeout".to_string())); @@ -379,8 +394,14 @@ impl HealthChecker { } let mut details = HashMap::new(); - details.insert("url".to_string(), serde_json::Value::String(user_service_url.clone())); - details.insert("response_time_ms".to_string(), serde_json::Value::from(elapsed)); + details.insert( + "url".to_string(), + serde_json::Value::String(user_service_url.clone()), + ); + details.insert( + "response_time_ms".to_string(), + serde_json::Value::from(elapsed), + ); health.with_details(details) } @@ -430,8 +451,14 @@ impl HealthChecker { } let mut details = HashMap::new(); - details.insert("url".to_string(), serde_json::Value::String(install_url.to_string())); - details.insert("response_time_ms".to_string(), serde_json::Value::from(elapsed)); + details.insert( + "url".to_string(), + serde_json::Value::String(install_url.to_string()), + ); + details.insert( + "response_time_ms".to_string(), + serde_json::Value::from(elapsed), + ); health.with_details(details) } @@ -452,4 +479,4 @@ impl HealthChecker { } } } -} \ No newline at end of file +} diff --git a/src/helpers/cloud/security.rs b/src/helpers/cloud/security.rs index 81a36c77..0f0b4122 100644 --- a/src/helpers/cloud/security.rs +++ b/src/helpers/cloud/security.rs @@ -50,12 +50,12 @@ impl Secret { let key: &Key = Key::::from_slice(sec_key.as_bytes()); let cipher = Aes256Gcm::new(key); let nonce = Aes256Gcm::generate_nonce(&mut OsRng); // 96-bits; unique per message - // eprintln!("Nonce bytes {nonce:?}"); - // let nonce_b64: String = general_purpose::STANDARD.encode(nonce); - // eprintln!("Nonce b64 {nonce_b64:?}"); - // Avoid logging the plaintext token to prevent leaking sensitive data. - // eprintln!("token {token:?}"); - // Avoid logging the plaintext token to prevent leaking sensitive data. + // eprintln!("Nonce bytes {nonce:?}"); + // let nonce_b64: String = general_purpose::STANDARD.encode(nonce); + // eprintln!("Nonce b64 {nonce_b64:?}"); + // Avoid logging the plaintext token to prevent leaking sensitive data. + // eprintln!("token {token:?}"); + // Avoid logging the plaintext token to prevent leaking sensitive data. let ciphertext = cipher .encrypt(&nonce, token.as_ref()) @@ -119,4 +119,4 @@ impl Secret { String::from_utf8(plaintext).map_err(|e| format!("UTF-8 conversion failed: {:?}", e)) } -} \ No newline at end of file +} diff --git a/src/helpers/json.rs b/src/helpers/json.rs index 004df7b2..3f2eafde 100644 --- a/src/helpers/json.rs +++ b/src/helpers/json.rs @@ -16,10 +16,10 @@ pub(crate) struct JsonResponse { pub(crate) meta: Option, } -#[derive(Serialize, Default)] +#[derive(Serialize)] pub struct JsonResponseBuilder where - T: serde::Serialize + Default, + T: serde::Serialize, { message: String, id: Option, @@ -30,7 +30,7 @@ where impl JsonResponseBuilder where - T: serde::Serialize + Default, + T: serde::Serialize, { pub(crate) fn set_msg>(mut self, msg: I) -> Self { self.message = msg.into(); @@ -111,10 +111,16 @@ where impl JsonResponse where - T: serde::Serialize + Default, + T: serde::Serialize, { pub fn build() -> JsonResponseBuilder { - JsonResponseBuilder::default() + JsonResponseBuilder { + message: String::new(), + id: None, + item: None, + list: None, + meta: None, + } } } diff --git a/src/helpers/mod.rs b/src/helpers/mod.rs index 0c338156..d29821d0 100644 --- a/src/helpers/mod.rs +++ b/src/helpers/mod.rs @@ -4,12 +4,15 @@ pub mod db_pools; pub(crate) mod json; pub mod mq_manager; pub mod project; +pub mod security_validator; +pub mod ssh_client; pub mod vault; pub use agent_client::*; pub use db_pools::*; pub use json::*; pub use mq_manager::*; +pub use ssh_client::*; pub use vault::*; pub(crate) mod cloud; pub(crate) mod compressor; diff --git a/src/helpers/security_validator.rs b/src/helpers/security_validator.rs new file mode 100644 index 00000000..668b07c8 --- /dev/null +++ b/src/helpers/security_validator.rs @@ -0,0 +1,477 @@ +use regex::Regex; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +/// Result of a single security check +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SecurityCheckResult { + pub passed: bool, + pub severity: String, // "critical", "warning", "info" + pub message: String, + pub details: Vec, +} + +/// Full security scan report +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SecurityReport { + pub no_secrets: SecurityCheckResult, + pub no_hardcoded_creds: SecurityCheckResult, + pub valid_docker_syntax: SecurityCheckResult, + pub no_malicious_code: SecurityCheckResult, + pub overall_passed: bool, + pub risk_score: u32, // 0-100, lower is better + pub recommendations: Vec, +} + +impl SecurityReport { + /// Convert to the JSONB format matching stack_template_review.security_checklist + pub fn to_checklist_json(&self) -> Value { + serde_json::json!({ + "no_secrets": self.no_secrets.passed, + "no_hardcoded_creds": self.no_hardcoded_creds.passed, + "valid_docker_syntax": self.valid_docker_syntax.passed, + "no_malicious_code": self.no_malicious_code.passed, + }) + } +} + +/// Patterns that indicate hardcoded secrets in environment variables or configs +const SECRET_PATTERNS: &[(&str, &str)] = &[ + (r"(?i)(aws_secret_access_key|aws_access_key_id)\s*[:=]\s*[A-Za-z0-9/+=]{20,}", "AWS credentials"), + (r"(?i)(api[_-]?key|apikey)\s*[:=]\s*[A-Za-z0-9_\-]{16,}", "API key"), + (r"(?i)(secret[_-]?key|secret_token)\s*[:=]\s*[A-Za-z0-9_\-]{16,}", "Secret key/token"), + (r"(?i)bearer\s+[A-Za-z0-9_\-\.]{20,}", "Bearer token"), + (r"(?i)(ghp|gho|ghu|ghs|ghr)_[A-Za-z0-9]{36,}", "GitHub token"), + (r"(?i)sk-[A-Za-z0-9]{20,}", "OpenAI/Stripe secret key"), + (r"(?i)(-----BEGIN\s+(RSA\s+)?PRIVATE\s+KEY-----)", "Private key"), + (r"(?i)AKIA[0-9A-Z]{16}", "AWS Access Key ID"), + (r"(?i)(slack[_-]?token|xox[bpas]-)", "Slack token"), + (r"(?i)(database_url|db_url)\s*[:=]\s*\S*:[^${\s]{8,}", "Database URL with credentials"), +]; + +/// Patterns for hardcoded credentials (passwords, default creds) +const CRED_PATTERNS: &[(&str, &str)] = &[ + (r#"(?i)(password|passwd|pwd)\s*[:=]\s*['"]?(?!(\$\{|\$\(|changeme|CHANGE_ME|your_password|example))[A-Za-z0-9!@#$%^&*]{6,}['"]?"#, "Hardcoded password"), + (r#"(?i)(mysql_root_password|postgres_password|mongo_initdb_root_password)\s*[:=]\s*['"]?(?!(\$\{|\$\())[^\s'"$]{4,}"#, "Hardcoded database password"), + (r"(?i)root:(?!(\$\{|\$\())[^\s:$]{4,}", "Root password in plain text"), +]; + +/// Patterns indicating potentially malicious or dangerous configurations +const MALICIOUS_PATTERNS: &[(&str, &str, &str)] = &[ + (r"(?i)privileged\s*:\s*true", "critical", "Container running in privileged mode"), + (r#"(?i)network_mode\s*:\s*['"]?host"#, "warning", "Container using host network"), + (r#"(?i)pid\s*:\s*['"]?host"#, "critical", "Container sharing host PID namespace"), + (r#"(?i)ipc\s*:\s*['"]?host"#, "critical", "Container sharing host IPC namespace"), + (r"(?i)cap_add\s*:.*SYS_ADMIN", "critical", "Container with SYS_ADMIN capability"), + (r"(?i)cap_add\s*:.*SYS_PTRACE", "warning", "Container with SYS_PTRACE capability"), + (r"(?i)cap_add\s*:.*ALL", "critical", "Container with ALL capabilities"), + (r"(?i)/var/run/docker\.sock", "critical", "Docker socket mounted (container escape risk)"), + (r"(?i)volumes\s*:.*:/host", "warning", "Suspicious host filesystem mount"), + (r"(?i)volumes\s*:.*:/etc(/|\s|$)", "warning", "Host /etc directory mounted"), + (r"(?i)volumes\s*:.*:/root", "critical", "Host /root directory mounted"), + (r"(?i)volumes\s*:.*:/proc", "critical", "Host /proc directory mounted"), + (r"(?i)volumes\s*:.*:/sys", "critical", "Host /sys directory mounted"), + (r"(?i)curl\s+.*\|\s*(sh|bash)", "warning", "Remote script execution via curl pipe"), + (r"(?i)wget\s+.*\|\s*(sh|bash)", "warning", "Remote script execution via wget pipe"), +]; + +/// Known suspicious Docker images +const SUSPICIOUS_IMAGES: &[&str] = &[ + "alpine:latest", // not suspicious per se, but discouraged for reproducibility +]; + +const KNOWN_CRYPTO_MINER_PATTERNS: &[&str] = &[ + "xmrig", "cpuminer", "cryptonight", "stratum+tcp", "minerd", "hashrate", + "monero", "coinhive", "coin-hive", +]; + +/// Normalize a JSON-pretty-printed string into a YAML-like format so that +/// regex patterns designed for docker-compose YAML also match JSON input. +/// +/// Transforms lines like: +/// `"AWS_SECRET_ACCESS_KEY": "wJalrXU..."` → `AWS_SECRET_ACCESS_KEY: wJalrXU...` +/// `"privileged": true` → `privileged: true` +fn normalize_json_for_matching(json: &str) -> String { + // Match JSON key-value pairs: "key": "value" or "key": non-string + let re = Regex::new(r#""([^"]+)"\s*:\s*"([^"]*)""#).unwrap(); + let pass1 = re.replace_all(json, "$1: $2"); + // Handle "key": true / false / number (non-string values) + let re2 = Regex::new(r#""([^"]+)"\s*:\s*([^",\}\]]+)"#).unwrap(); + re2.replace_all(&pass1, "$1: $2").to_string() +} + +/// Run all security checks on a stack definition +pub fn validate_stack_security(stack_definition: &Value) -> SecurityReport { + // Convert the stack definition to a string for pattern matching. + // When the input is a JSON object, serde_json produces `"key": "value"` format + // which breaks YAML-oriented regex patterns. We normalize by stripping JSON + // key/value quotes so patterns like `key\s*:\s*value` match both formats. + let definition_str = match stack_definition { + Value::String(s) => s.clone(), + _ => { + let json = serde_json::to_string_pretty(stack_definition).unwrap_or_default(); + normalize_json_for_matching(&json) + } + }; + + let no_secrets = check_no_secrets(&definition_str); + let no_hardcoded_creds = check_no_hardcoded_creds(&definition_str); + let valid_docker_syntax = check_valid_docker_syntax(stack_definition, &definition_str); + let no_malicious_code = check_no_malicious_code(&definition_str); + + let overall_passed = no_secrets.passed + && no_hardcoded_creds.passed + && valid_docker_syntax.passed + && no_malicious_code.passed; + + // Calculate risk score (0-100) + let mut risk_score: u32 = 0; + if !no_secrets.passed { + risk_score += 40; + } + if !no_hardcoded_creds.passed { + risk_score += 25; + } + if !valid_docker_syntax.passed { + risk_score += 10; + } + if !no_malicious_code.passed { + risk_score += 25; + } + + // Additional risk from severity of findings + let critical_count = no_malicious_code + .details + .iter() + .filter(|d| d.contains("[CRITICAL]")) + .count(); + risk_score = (risk_score + (critical_count as u32 * 5)).min(100); + + let mut recommendations = Vec::new(); + if !no_secrets.passed { + recommendations.push("Replace hardcoded secrets with environment variable references (e.g., ${SECRET_KEY})".to_string()); + } + if !no_hardcoded_creds.passed { + recommendations.push("Use Docker secrets or environment variable references for passwords".to_string()); + } + if !valid_docker_syntax.passed { + recommendations.push("Fix Docker Compose syntax issues to ensure deployability".to_string()); + } + if !no_malicious_code.passed { + recommendations.push("Review and remove dangerous container configurations (privileged mode, host mounts)".to_string()); + } + if risk_score == 0 { + recommendations.push("Automated scan passed. AI review recommended for deeper analysis.".to_string()); + } + + SecurityReport { + no_secrets, + no_hardcoded_creds, + valid_docker_syntax, + no_malicious_code, + overall_passed, + risk_score, + recommendations, + } +} + +fn check_no_secrets(content: &str) -> SecurityCheckResult { + let mut findings = Vec::new(); + + for (pattern, description) in SECRET_PATTERNS { + if let Ok(re) = Regex::new(pattern) { + for mat in re.find_iter(content) { + let context = &content[mat.start()..mat.end().min(mat.start() + 60)]; + // Mask the actual value + let masked = if context.len() > 20 { + format!("{}...***", &context[..20]) + } else { + "***masked***".to_string() + }; + findings.push(format!("[CRITICAL] {}: {}", description, masked)); + } + } + } + + SecurityCheckResult { + passed: findings.is_empty(), + severity: if findings.is_empty() { + "info".to_string() + } else { + "critical".to_string() + }, + message: if findings.is_empty() { + "No exposed secrets detected".to_string() + } else { + format!("Found {} potential secret(s) in stack definition", findings.len()) + }, + details: findings, + } +} + +fn check_no_hardcoded_creds(content: &str) -> SecurityCheckResult { + let mut findings = Vec::new(); + + for (pattern, description) in CRED_PATTERNS { + if let Ok(re) = Regex::new(pattern) { + for mat in re.find_iter(content) { + let line = content[..mat.start()] + .lines() + .count() + + 1; + findings.push(format!("[WARNING] {} near line {}", description, line)); + } + } + } + + // Check for common default credentials + let default_creds = [ + ("admin:admin", "Default admin:admin credentials"), + ("root:root", "Default root:root credentials"), + ("admin:password", "Default admin:password credentials"), + ("user:password", "Default user:password credentials"), + ]; + + for (cred, desc) in default_creds { + if content.to_lowercase().contains(cred) { + findings.push(format!("[WARNING] {}", desc)); + } + } + + SecurityCheckResult { + passed: findings.is_empty(), + severity: if findings.is_empty() { + "info".to_string() + } else { + "warning".to_string() + }, + message: if findings.is_empty() { + "No hardcoded credentials detected".to_string() + } else { + format!( + "Found {} potential hardcoded credential(s)", + findings.len() + ) + }, + details: findings, + } +} + +fn check_valid_docker_syntax(stack_definition: &Value, raw_content: &str) -> SecurityCheckResult { + let mut findings = Vec::new(); + + // Check if it looks like valid docker-compose structure + let has_services = stack_definition.get("services").is_some() + || raw_content.contains("services:"); + + if !has_services { + findings.push("[WARNING] Missing 'services' key — may not be valid Docker Compose".to_string()); + } + + // Check for 'version' key (optional in modern compose but common) + let has_version = stack_definition.get("version").is_some() + || raw_content.contains("version:"); + + // Check that services have images or build contexts + if let Some(services) = stack_definition.get("services") { + if let Some(services_map) = services.as_object() { + for (name, service) in services_map { + let has_image = service.get("image").is_some(); + let has_build = service.get("build").is_some(); + if !has_image && !has_build { + findings.push(format!( + "[WARNING] Service '{}' has neither 'image' nor 'build' defined", + name + )); + } + + // Check for image tags — warn on :latest + if let Some(image) = service.get("image").and_then(|v| v.as_str()) { + if image.ends_with(":latest") || !image.contains(':') { + findings.push(format!( + "[INFO] Service '{}' uses unpinned image tag '{}' — consider pinning a specific version", + name, image + )); + } + } + } + + if services_map.is_empty() { + findings.push("[WARNING] Services section is empty".to_string()); + } + } + } + + let errors_only: Vec<&String> = findings.iter().filter(|f| f.contains("[WARNING]")).collect(); + + SecurityCheckResult { + passed: errors_only.is_empty(), + severity: if errors_only.is_empty() { + "info".to_string() + } else { + "warning".to_string() + }, + message: if errors_only.is_empty() { + if has_version { + "Docker Compose syntax looks valid".to_string() + } else { + "Docker Compose syntax acceptable (no version key, modern format)".to_string() + } + } else { + format!("Found {} Docker Compose syntax issue(s)", errors_only.len()) + }, + details: findings, + } +} + +fn check_no_malicious_code(content: &str) -> SecurityCheckResult { + let mut findings = Vec::new(); + + // Check for dangerous Docker configurations + for (pattern, severity, description) in MALICIOUS_PATTERNS { + if let Ok(re) = Regex::new(pattern) { + if re.is_match(content) { + findings.push(format!("[{}] {}", severity.to_uppercase(), description)); + } + } + } + + // Check for crypto miner patterns + let content_lower = content.to_lowercase(); + for miner_pattern in KNOWN_CRYPTO_MINER_PATTERNS { + if content_lower.contains(miner_pattern) { + findings.push(format!( + "[CRITICAL] Potential crypto miner reference detected: '{}'", + miner_pattern + )); + } + } + + // Check for suspicious base64 encoded content (long base64 strings could hide payloads) + if let Ok(re) = Regex::new(r"[A-Za-z0-9+/]{100,}={0,2}") { + if re.is_match(content) { + findings.push("[WARNING] Long base64-encoded content detected — may contain hidden payload".to_string()); + } + } + + // Check for outbound network calls in entrypoints/commands + if let Ok(re) = Regex::new(r"(?i)(curl|wget|nc|ncat)\s+.*(http|ftp|tcp)") { + if re.is_match(content) { + findings.push("[INFO] Outbound network call detected in command/entrypoint — review if expected".to_string()); + } + } + + let critical_or_warning: Vec<&String> = findings + .iter() + .filter(|f| f.contains("[CRITICAL]") || f.contains("[WARNING]")) + .collect(); + + SecurityCheckResult { + passed: critical_or_warning.is_empty(), + severity: if findings.iter().any(|f| f.contains("[CRITICAL]")) { + "critical".to_string() + } else if findings.iter().any(|f| f.contains("[WARNING]")) { + "warning".to_string() + } else { + "info".to_string() + }, + message: if critical_or_warning.is_empty() { + "No malicious patterns detected".to_string() + } else { + format!( + "Found {} potentially dangerous configuration(s)", + critical_or_warning.len() + ) + }, + details: findings, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_clean_definition_passes() { + let definition = json!({ + "version": "3.8", + "services": { + "web": { + "image": "nginx:1.25", + "ports": ["80:80"] + }, + "db": { + "image": "postgres:16", + "environment": { + "POSTGRES_PASSWORD": "${DB_PASSWORD}" + } + } + } + }); + + let report = validate_stack_security(&definition); + assert!(report.overall_passed); + assert_eq!(report.risk_score, 0); + } + + #[test] + fn test_hardcoded_secret_detected() { + let definition = json!({ + "services": { + "app": { + "image": "myapp:1.0", + "environment": { + "AWS_SECRET_ACCESS_KEY": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + } + } + } + }); + + let report = validate_stack_security(&definition); + assert!(!report.no_secrets.passed); + assert!(report.risk_score >= 40); + } + + #[test] + fn test_privileged_mode_detected() { + let definition = json!({ + "services": { + "app": { + "image": "myapp:1.0", + "privileged": true + } + } + }); + + let report = validate_stack_security(&definition); + assert!(!report.no_malicious_code.passed); + } + + #[test] + fn test_docker_socket_mount_detected() { + let definition = json!({ + "services": { + "app": { + "image": "myapp:1.0", + "volumes": ["/var/run/docker.sock:/var/run/docker.sock"] + } + } + }); + + let report = validate_stack_security(&definition); + assert!(!report.no_malicious_code.passed); + } + + #[test] + fn test_missing_services_key() { + let definition = json!({ + "app": { + "image": "nginx:1.25" + } + }); + + let report = validate_stack_security(&definition); + assert!(!report.valid_docker_syntax.passed); + } +} diff --git a/src/helpers/ssh_client.rs b/src/helpers/ssh_client.rs new file mode 100644 index 00000000..11616ca2 --- /dev/null +++ b/src/helpers/ssh_client.rs @@ -0,0 +1,443 @@ +//! SSH client for remote server validation +//! +//! Uses russh to connect to servers and execute system check commands. + +use async_trait::async_trait; +use russh::client::{Config, Handle}; +use russh::keys::key::KeyPair; +use russh::Preferred; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use std::time::Duration; +use tokio::time::timeout; + +/// Result of a full system check via SSH +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SystemCheckResult { + /// SSH connection was successful + pub connected: bool, + /// SSH authentication was successful + pub authenticated: bool, + /// Username from whoami + pub username: Option, + /// Total disk space in GB + pub disk_total_gb: Option, + /// Available disk space in GB + pub disk_available_gb: Option, + /// Disk usage percentage + pub disk_usage_percent: Option, + /// Docker is installed + pub docker_installed: bool, + /// Docker version string + pub docker_version: Option, + /// OS name (from /etc/os-release) + pub os_name: Option, + /// OS version + pub os_version: Option, + /// Total memory in MB + pub memory_total_mb: Option, + /// Available memory in MB + pub memory_available_mb: Option, + /// Error message if validation failed + pub error: Option, +} + +impl Default for SystemCheckResult { + fn default() -> Self { + Self { + connected: false, + authenticated: false, + username: None, + disk_total_gb: None, + disk_available_gb: None, + disk_usage_percent: None, + docker_installed: false, + docker_version: None, + os_name: None, + os_version: None, + memory_total_mb: None, + memory_available_mb: None, + error: None, + } + } +} + +impl SystemCheckResult { + /// Check if the system meets minimum requirements + pub fn meets_requirements(&self) -> bool { + self.connected + && self.authenticated + && self.docker_installed + && self.disk_available_gb.map_or(false, |gb| gb >= 5.0) + } + + /// Generate a human-readable summary + pub fn summary(&self) -> String { + if !self.connected { + return "Connection failed".to_string(); + } + if !self.authenticated { + return "Authentication failed".to_string(); + } + + let mut parts = vec![]; + + if let Some(os) = &self.os_name { + if let Some(ver) = &self.os_version { + parts.push(format!("{} {}", os, ver)); + } else { + parts.push(os.clone()); + } + } + + if let Some(disk) = self.disk_available_gb { + parts.push(format!("{:.1}GB available", disk)); + } + + if self.docker_installed { + if let Some(ver) = &self.docker_version { + parts.push(format!("Docker {}", ver)); + } else { + parts.push("Docker installed".to_string()); + } + } else { + parts.push("Docker NOT installed".to_string()); + } + + if parts.is_empty() { + "Connected".to_string() + } else { + parts.join(", ") + } + } +} + +/// SSH client handler for russh +struct ClientHandler; + +#[async_trait] +impl russh::client::Handler for ClientHandler { + type Error = russh::Error; + + async fn check_server_key( + &mut self, + _server_public_key: &russh::keys::key::PublicKey, + ) -> Result { + // Accept all host keys for server validation + // In production, consider implementing host key verification + Ok(true) + } +} + +/// Perform a full system check via SSH +/// +/// Connects to the server, authenticates with the provided private key, +/// and runs diagnostic commands to gather system information. +pub async fn check_server( + host: &str, + port: u16, + username: &str, + private_key_pem: &str, + connection_timeout: Duration, +) -> SystemCheckResult { + let mut result = SystemCheckResult::default(); + + // Parse the private key + let key = match parse_private_key(private_key_pem) { + Ok(k) => k, + Err(e) => { + tracing::error!("Failed to parse SSH private key: {}", e); + result.error = Some(format!("Invalid SSH key: {}", e)); + return result; + } + }; + + // Build SSH config + let config = Arc::new(Config { + preferred: Preferred::DEFAULT, + ..Default::default() + }); + + // Connect with timeout + let addr = format!("{}:{}", host, port); + tracing::info!("Connecting to {} as {}", addr, username); + + let connection_result = + timeout(connection_timeout, connect_and_auth(config, &addr, username, key)).await; + + match connection_result { + Ok(Ok(handle)) => { + result.connected = true; + result.authenticated = true; + tracing::info!("SSH connection established successfully"); + + // Run system checks + run_system_checks(&mut result, handle).await; + } + Ok(Err(e)) => { + tracing::warn!("SSH connection/auth failed: {}", e); + let error_str = e.to_string().to_lowercase(); + if error_str.contains("auth") || error_str.contains("key") || error_str.contains("permission") { + result.connected = true; + result.error = Some(format!("Authentication failed: {}", e)); + } else { + result.error = Some(format!("Connection failed: {}", e)); + } + } + Err(_) => { + tracing::warn!("SSH connection timed out after {:?}", connection_timeout); + result.error = Some(format!( + "Connection timed out after {} seconds", + connection_timeout.as_secs() + )); + } + } + + result +} + +/// Parse a PEM-encoded private key (OpenSSH or traditional formats) +fn parse_private_key(pem: &str) -> Result { + // russh-keys supports various formats including OpenSSH and traditional PEM + let key = russh::keys::decode_secret_key(pem, None)?; + Ok(key) +} + +/// Connect and authenticate to the SSH server +async fn connect_and_auth( + config: Arc, + addr: &str, + username: &str, + key: KeyPair, +) -> Result, anyhow::Error> { + let handler = ClientHandler; + let mut handle = russh::client::connect(config, addr, handler).await?; + + // Authenticate with public key + let authenticated = handle.authenticate_publickey(username, Arc::new(key)).await?; + + if !authenticated { + return Err(anyhow::anyhow!("Public key authentication failed")); + } + + Ok(handle) +} + +/// Run system check commands and populate the result +async fn run_system_checks(result: &mut SystemCheckResult, handle: Handle) { + // Check username + if let Ok(output) = exec_command(&handle, "whoami").await { + result.username = Some(output.trim().to_string()); + } + + // Check disk space (df -BG /) + if let Ok(output) = exec_command(&handle, "df -BG / 2>/dev/null | tail -1").await { + parse_disk_info(result, &output); + } + + // Check Docker + match exec_command(&handle, "docker --version 2>/dev/null").await { + Ok(output) if !output.is_empty() && !output.contains("not found") => { + result.docker_installed = true; + // Extract version number (e.g., "Docker version 24.0.5, build ced0996") + if let Some(version) = output + .strip_prefix("Docker version ") + .and_then(|s| s.split(',').next()) + { + result.docker_version = Some(version.trim().to_string()); + } + } + _ => { + result.docker_installed = false; + } + } + + // Check OS info + if let Ok(output) = exec_command(&handle, "cat /etc/os-release 2>/dev/null").await { + parse_os_info(result, &output); + } + + // Check memory (free -m) + if let Ok(output) = exec_command(&handle, "free -m 2>/dev/null | grep -i mem").await { + parse_memory_info(result, &output); + } +} + +/// Execute a command on the remote server and return stdout +async fn exec_command( + handle: &Handle, + command: &str, +) -> Result { + let mut channel = handle.channel_open_session().await?; + channel.exec(true, command).await?; + + let mut output = Vec::new(); + let timeout_duration = Duration::from_secs(10); + + let read_result = timeout(timeout_duration, async { + loop { + match channel.wait().await { + Some(russh::ChannelMsg::Data { data }) => { + output.extend_from_slice(&data); + } + Some(russh::ChannelMsg::ExtendedData { data, ext: _ }) => { + // stderr - ignore for now + let _ = data; + } + Some(russh::ChannelMsg::Eof) => break, + Some(russh::ChannelMsg::ExitStatus { exit_status: _ }) => {} + Some(russh::ChannelMsg::Close) => break, + None => break, + _ => {} + } + } + }) + .await; + + if read_result.is_err() { + tracing::warn!("Command '{}' timed out", command); + } + + // Close the channel + let _ = channel.eof().await; + let _ = channel.close().await; + + Ok(String::from_utf8_lossy(&output).to_string()) +} + +/// Parse disk info from df output +fn parse_disk_info(result: &mut SystemCheckResult, output: &str) { + // df -BG output: "Filesystem 1G-blocks Used Available Use% Mounted on" + // Example line: "/dev/sda1 50G 20G 28G 42% /" + let parts: Vec<&str> = output.split_whitespace().collect(); + if parts.len() >= 4 { + // Parse total (index 1) + if let Some(total) = parts.get(1).and_then(|s| s.trim_end_matches('G').parse::().ok()) + { + result.disk_total_gb = Some(total); + } + + // Parse available (index 3) + if let Some(avail) = parts.get(3).and_then(|s| s.trim_end_matches('G').parse::().ok()) + { + result.disk_available_gb = Some(avail); + } + + // Parse usage percentage (index 4) + if let Some(usage) = parts.get(4).and_then(|s| s.trim_end_matches('%').parse::().ok()) + { + result.disk_usage_percent = Some(usage); + } + } +} + +/// Parse OS info from /etc/os-release +fn parse_os_info(result: &mut SystemCheckResult, output: &str) { + for line in output.lines() { + if line.starts_with("NAME=") { + result.os_name = Some( + line.trim_start_matches("NAME=") + .trim_matches('"') + .to_string(), + ); + } else if line.starts_with("VERSION=") { + result.os_version = Some( + line.trim_start_matches("VERSION=") + .trim_matches('"') + .to_string(), + ); + } else if line.starts_with("VERSION_ID=") && result.os_version.is_none() { + result.os_version = Some( + line.trim_start_matches("VERSION_ID=") + .trim_matches('"') + .to_string(), + ); + } + } +} + +/// Parse memory info from free -m output +fn parse_memory_info(result: &mut SystemCheckResult, output: &str) { + // free -m | grep Mem output: "Mem: 15883 5234 8234 123 2414 10315" + let parts: Vec<&str> = output.split_whitespace().collect(); + if parts.len() >= 4 { + // Total memory (index 1) + if let Some(total) = parts.get(1).and_then(|s| s.parse::().ok()) { + result.memory_total_mb = Some(total); + } + + // Available memory (index 6 in newer free, or calculate from free + buffers/cache) + // For simplicity, use the "free" column (index 3) + buffers/cache (index 5) if available + if let Some(avail) = parts.get(6).and_then(|s| s.parse::().ok()) { + result.memory_available_mb = Some(avail); + } else if let Some(free) = parts.get(3).and_then(|s| s.parse::().ok()) { + // Fallback to free column + result.memory_available_mb = Some(free); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_disk_info() { + let mut result = SystemCheckResult::default(); + parse_disk_info(&mut result, "/dev/sda1 50G 20G 28G 42% /"); + + assert_eq!(result.disk_total_gb, Some(50.0)); + assert_eq!(result.disk_available_gb, Some(28.0)); + assert_eq!(result.disk_usage_percent, Some(42.0)); + } + + #[test] + fn test_parse_os_info() { + let mut result = SystemCheckResult::default(); + let os_release = r#"NAME="Ubuntu" +VERSION="22.04.3 LTS (Jammy Jellyfish)" +ID=ubuntu +VERSION_ID="22.04" +"#; + parse_os_info(&mut result, os_release); + + assert_eq!(result.os_name, Some("Ubuntu".to_string())); + assert_eq!( + result.os_version, + Some("22.04.3 LTS (Jammy Jellyfish)".to_string()) + ); + } + + #[test] + fn test_parse_memory_info() { + let mut result = SystemCheckResult::default(); + parse_memory_info( + &mut result, + "Mem: 15883 5234 8234 123 2414 10315", + ); + + assert_eq!(result.memory_total_mb, Some(15883)); + assert_eq!(result.memory_available_mb, Some(10315)); + } + + #[test] + fn test_summary() { + let mut result = SystemCheckResult::default(); + assert_eq!(result.summary(), "Connection failed"); + + result.connected = true; + assert_eq!(result.summary(), "Authentication failed"); + + result.authenticated = true; + result.os_name = Some("Ubuntu".to_string()); + result.os_version = Some("22.04".to_string()); + result.disk_available_gb = Some(50.0); + result.docker_installed = true; + result.docker_version = Some("24.0.5".to_string()); + + assert_eq!( + result.summary(), + "Ubuntu 22.04, 50.0GB available, Docker 24.0.5" + ); + } +} diff --git a/src/helpers/vault.rs b/src/helpers/vault.rs index 2e62eeff..d468c4a7 100644 --- a/src/helpers/vault.rs +++ b/src/helpers/vault.rs @@ -167,21 +167,21 @@ impl VaultClient { // ============ SSH Key Management Methods ============ - /// Build the Vault path for SSH keys: {base}/v1/secret/data/users/{user_id}/ssh_keys/{server_id} + /// Build the Vault path for SSH keys: {base}/v1/secret/users/{user_id}/ssh_keys/{server_id} fn ssh_key_path(&self, user_id: &str, server_id: i32) -> String { let base = self.address.trim_end_matches('/'); let api_prefix = self.api_prefix.trim_matches('/'); let prefix = self.ssh_key_path_prefix.trim_matches('/'); - // For KV v2, the path must include 'secret/data/' + // Path without 'data' segment (KV v1 or custom mount) if api_prefix.is_empty() { format!( - "{}/secret/data/{}/{}/ssh_keys/{}", + "{}/secret/{}/{}/ssh_keys/{}", base, prefix, user_id, server_id ) } else { format!( - "{}/{}/secret/data/{}/{}/ssh_keys/{}", + "{}/{}/secret/{}/{}/ssh_keys/{}", base, api_prefix, prefix, user_id, server_id ) } @@ -246,7 +246,7 @@ impl VaultClient { // Return the vault path for storage in database let vault_key_path = format!( - "secret/data/{}/{}/ssh_keys/{}", + "secret/{}/{}/ssh_keys/{}", self.ssh_key_path_prefix.trim_matches('/'), user_id, server_id diff --git a/src/mcp/registry.rs b/src/mcp/registry.rs index 6e34ed0c..191266a4 100644 --- a/src/mcp/registry.rs +++ b/src/mcp/registry.rs @@ -10,6 +10,12 @@ use std::sync::Arc; use super::protocol::{Tool, ToolContent}; use crate::mcp::tools::{ AddCloudTool, + AdminApproveTemplateTool, + AdminGetTemplateDetailTool, + AdminListSubmittedTemplatesTool, + AdminListTemplateReviewsTool, + AdminListTemplateVersionsTool, + AdminValidateTemplateSecurityTool, ApplyVaultConfigTool, CancelDeploymentTool, CloneProjectTool, @@ -20,6 +26,8 @@ use crate::mcp::tools::{ DeleteCloudTool, DeleteProjectTool, DeleteProxyTool, + // Ansible Roles tools + DeployRoleTool, DiagnoseDeploymentTool, DiscoverStackServicesTool, EscalateToSupportTool, @@ -37,11 +45,14 @@ use crate::mcp::tools::{ GetInstallationDetailsTool, GetLiveChatInfoTool, GetProjectTool, + GetRoleDetailsTool, + GetRoleRequirementsTool, GetServerResourcesTool, GetSubscriptionPlanTool, GetUserProfileTool, // Phase 5: Vault Configuration tools GetVaultConfigTool, + ListAvailableRolesTool, ListCloudsTool, ListContainersTool, ListInstallationsTool, @@ -58,10 +69,12 @@ use crate::mcp::tools::{ StartDeploymentTool, // Phase 5: Container Operations tools StopContainerTool, + AdminRejectTemplateTool, SuggestResourcesTool, UpdateAppDomainTool, UpdateAppPortsTool, ValidateDomainTool, + ValidateRoleVarsTool, // Phase 5: Stack Validation tool ValidateStackConfigTool, }; @@ -190,6 +203,46 @@ impl ToolRegistry { registry.register("get_server_resources", Box::new(GetServerResourcesTool)); registry.register("get_container_exec", Box::new(GetContainerExecTool)); + // Marketplace Admin tools (admin role required) + registry.register( + "admin_list_submitted_templates", + Box::new(AdminListSubmittedTemplatesTool), + ); + registry.register( + "admin_get_template_detail", + Box::new(AdminGetTemplateDetailTool), + ); + registry.register( + "admin_approve_template", + Box::new(AdminApproveTemplateTool), + ); + registry.register( + "admin_reject_template", + Box::new(AdminRejectTemplateTool), + ); + registry.register( + "admin_list_template_versions", + Box::new(AdminListTemplateVersionsTool), + ); + registry.register( + "admin_list_template_reviews", + Box::new(AdminListTemplateReviewsTool), + ); + registry.register( + "admin_validate_template_security", + Box::new(AdminValidateTemplateSecurityTool), + ); + + // Ansible Roles tools (SSH deployment method) + registry.register("list_available_roles", Box::new(ListAvailableRolesTool)); + registry.register("get_role_details", Box::new(GetRoleDetailsTool)); + registry.register( + "get_role_requirements", + Box::new(GetRoleRequirementsTool), + ); + registry.register("validate_role_vars", Box::new(ValidateRoleVarsTool)); + registry.register("deploy_role", Box::new(DeployRoleTool)); + registry } diff --git a/src/mcp/tools/ansible_roles.rs b/src/mcp/tools/ansible_roles.rs new file mode 100644 index 00000000..6870ec2d --- /dev/null +++ b/src/mcp/tools/ansible_roles.rs @@ -0,0 +1,570 @@ +//! MCP Tools for Ansible Roles Management +//! +//! These tools provide AI access to: +//! - Discover available Ansible roles +//! - Get role details, requirements, and variables +//! - Validate role configuration +//! - Deploy roles to SSH-accessible servers +//! +//! Role discovery uses hybrid approach: +//! - Primary: Database `role` table via PostgREST +//! - Fallback: Filesystem scan of tfa/roles/ directory +//! +//! Used for SSH deployment method in Stack Builder UI. + +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use crate::mcp::protocol::{Tool, ToolContent}; +use crate::mcp::registry::{ToolContext, ToolHandler}; + +const ROLES_BASE_PATH: &str = "/ansible/roles"; +const POSTGREST_ROLE_ENDPOINT: &str = "/role"; + +/// Role metadata structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AnsibleRole { + pub name: String, + pub description: Option, + pub public_ports: Vec, + pub private_ports: Vec, + pub variables: HashMap, + pub dependencies: Vec, + pub supported_os: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RoleVariable { + pub name: String, + pub default_value: Option, + pub description: Option, + pub required: bool, + pub var_type: String, // string, integer, boolean, etc. +} + +/// Fetch roles from database via PostgREST +async fn fetch_roles_from_db(context: &ToolContext) -> Result, String> { + let user_service_url = &context.settings.user_service_url; + let endpoint = format!("{}{}", user_service_url, POSTGREST_ROLE_ENDPOINT); + + let client = reqwest::Client::new(); + let response = client + .get(&endpoint) + .header("Authorization", format!("Bearer {}", context.user.access_token.as_deref().unwrap_or(""))) + .send() + .await + .map_err(|e| format!("Failed to fetch roles from database: {}", e))?; + + if !response.status().is_success() { + return Err(format!("Database query failed: {}", response.status())); + } + + #[derive(Deserialize)] + struct DbRole { + name: String, + #[serde(default)] + public_ports: Vec, + #[serde(default)] + private_ports: Vec, + } + + let db_roles: Vec = response + .json() + .await + .map_err(|e| format!("Failed to parse database response: {}", e))?; + + Ok(db_roles + .into_iter() + .map(|r| AnsibleRole { + name: r.name, + description: None, + public_ports: r.public_ports, + private_ports: r.private_ports, + variables: HashMap::new(), + dependencies: vec![], + supported_os: vec![], + }) + .collect()) +} + +/// Scan filesystem for available roles +fn scan_roles_from_filesystem() -> Result, String> { + let roles_path = Path::new(ROLES_BASE_PATH); + + if !roles_path.exists() { + return Err(format!("Roles directory not found: {}", ROLES_BASE_PATH)); + } + + let mut roles = vec![]; + + if let Ok(entries) = std::fs::read_dir(roles_path) { + for entry in entries.flatten() { + if let Ok(file_type) = entry.file_type() { + if file_type.is_dir() { + if let Some(name) = entry.file_name().to_str() { + // Skip hidden directories and common non-role dirs + if !name.starts_with('.') && name != "old" && name != "custom" { + roles.push(name.to_string()); + } + } + } + } + } + } + + roles.sort(); + Ok(roles) +} + +/// Get detailed information about a specific role from filesystem +fn get_role_details_from_fs(role_name: &str) -> Result { + let role_path = PathBuf::from(ROLES_BASE_PATH).join(role_name); + + if !role_path.exists() { + return Err(format!("Role '{}' not found in filesystem", role_name)); + } + + let mut role = AnsibleRole { + name: role_name.to_string(), + description: None, + public_ports: vec![], + private_ports: vec![], + variables: HashMap::new(), + dependencies: vec![], + supported_os: vec!["ubuntu", "debian"].into_iter().map(|s| s.to_string()).collect(), // default + }; + + // Parse README.md for description + let readme_path = role_path.join("README.md"); + if readme_path.exists() { + if let Ok(content) = std::fs::read_to_string(&readme_path) { + // Extract first non-empty line after "Role Name" or "Description" + for line in content.lines() { + let trimmed = line.trim(); + if !trimmed.is_empty() + && !trimmed.starts_with('#') + && !trimmed.starts_with('=') + && !trimmed.starts_with('-') + && trimmed.len() > 10 { + role.description = Some(trimmed.to_string()); + break; + } + } + } + } + + // Parse defaults/main.yml for variables + let defaults_path = role_path.join("defaults/main.yml"); + if defaults_path.exists() { + if let Ok(content) = std::fs::read_to_string(&defaults_path) { + // Simple YAML parsing for variable names (not full parser) + for line in content.lines() { + if let Some((key, value)) = parse_yaml_variable(line) { + role.variables.insert( + key.clone(), + RoleVariable { + name: key, + default_value: Some(value), + description: None, + required: false, + var_type: "string".to_string(), + }, + ); + } + } + } + } + + Ok(role) +} + +/// Simple YAML variable parser (key: value) +fn parse_yaml_variable(line: &str) -> Option<(String, String)> { + let trimmed = line.trim(); + if trimmed.starts_with('#') || trimmed.starts_with("---") || trimmed.is_empty() { + return None; + } + + if let Some(colon_pos) = trimmed.find(':') { + let key = trimmed[..colon_pos].trim(); + let value = trimmed[colon_pos + 1..].trim(); + + if !key.is_empty() && !value.is_empty() { + return Some((key.to_string(), value.to_string())); + } + } + + None +} + +/// Tool: list_available_roles - Get catalog of all Ansible roles +pub struct ListAvailableRolesTool; + +#[async_trait] +impl ToolHandler for ListAvailableRolesTool { + async fn execute(&self, _args: Value, context: &ToolContext) -> Result { + // Try database first + let roles = match fetch_roles_from_db(context).await { + Ok(db_roles) => { + tracing::info!("Fetched {} roles from database", db_roles.len()); + db_roles + } + Err(db_err) => { + tracing::warn!("Database fetch failed ({}), falling back to filesystem", db_err); + + // Fallback to filesystem scan + let role_names = scan_roles_from_filesystem()?; + tracing::info!("Scanned {} roles from filesystem", role_names.len()); + + role_names + .into_iter() + .map(|name| AnsibleRole { + name, + description: None, + public_ports: vec![], + private_ports: vec![], + variables: HashMap::new(), + dependencies: vec![], + supported_os: vec![], + }) + .collect() + } + }; + + let result = json!({ + "status": "success", + "total_roles": roles.len(), + "roles": roles.iter().map(|r| json!({ + "name": r.name, + "description": r.description.as_deref().unwrap_or("No description available"), + "public_ports": r.public_ports, + "private_ports": r.private_ports, + })).collect::>(), + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "list_available_roles".to_string(), + description: "Get a catalog of all available Ansible roles for SSH-based deployments. \ + Returns role names, descriptions, and port configurations. \ + Uses database as primary source with filesystem fallback." + .to_string(), + input_schema: json!({ + "type": "object", + "properties": {}, + "required": [] + }), + } + } +} + +/// Tool: get_role_details - Get detailed info about a specific role +pub struct GetRoleDetailsTool; + +#[async_trait] +impl ToolHandler for GetRoleDetailsTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + #[derive(Deserialize)] + struct Args { + role_name: String, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + // Get detailed info from filesystem (includes variables, README, etc.) + let role = get_role_details_from_fs(¶ms.role_name)?; + + let result = json!({ + "status": "success", + "role": { + "name": role.name, + "description": role.description, + "public_ports": role.public_ports, + "private_ports": role.private_ports, + "variables": role.variables, + "dependencies": role.dependencies, + "supported_os": role.supported_os, + } + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "get_role_details".to_string(), + description: "Get detailed information about a specific Ansible role. \ + Returns description, variables, dependencies, supported OS, and ports. \ + Parses role's README.md and defaults/main.yml for metadata." + .to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "role_name": { + "type": "string", + "description": "Name of the Ansible role (e.g., 'nginx', 'postgres', 'redis')" + } + }, + "required": ["role_name"] + }), + } + } +} + +/// Tool: get_role_requirements - Get role requirements and dependencies +pub struct GetRoleRequirementsTool; + +#[async_trait] +impl ToolHandler for GetRoleRequirementsTool { + async fn execute(&self, args: Value, _context: &ToolContext) -> Result { + #[derive(Deserialize)] + struct Args { + role_name: String, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let role = get_role_details_from_fs(¶ms.role_name)?; + + let result = json!({ + "status": "success", + "role_name": role.name, + "requirements": { + "dependencies": role.dependencies, + "supported_os": role.supported_os, + "required_variables": role.variables.values() + .filter(|v| v.required) + .map(|v| &v.name) + .collect::>(), + "optional_variables": role.variables.values() + .filter(|v| !v.required) + .map(|v| &v.name) + .collect::>(), + "public_ports": role.public_ports, + "private_ports": role.private_ports, + } + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "get_role_requirements".to_string(), + description: "Get requirements and dependencies for a specific Ansible role. \ + Returns OS requirements, dependent roles, required/optional variables, \ + and port configurations needed for deployment." + .to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "role_name": { + "type": "string", + "description": "Name of the Ansible role" + } + }, + "required": ["role_name"] + }), + } + } +} + +/// Tool: validate_role_vars - Validate role variable configuration +pub struct ValidateRoleVarsTool; + +#[async_trait] +impl ToolHandler for ValidateRoleVarsTool { + async fn execute(&self, args: Value, _context: &ToolContext) -> Result { + #[derive(Deserialize)] + struct Args { + role_name: String, + variables: HashMap, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let role = get_role_details_from_fs(¶ms.role_name)?; + + let mut errors = vec![]; + let mut warnings = vec![]; + + // Check required variables + for (var_name, var_def) in &role.variables { + if var_def.required && !params.variables.contains_key(var_name) { + errors.push(format!("Required variable '{}' is missing", var_name)); + } + } + + // Check for unknown variables + for user_var in params.variables.keys() { + if !role.variables.contains_key(user_var) { + warnings.push(format!( + "Variable '{}' is not defined in role defaults (may be unused)", + user_var + )); + } + } + + let is_valid = errors.is_empty(); + + let result = json!({ + "status": if is_valid { "valid" } else { "invalid" }, + "role_name": role.name, + "valid": is_valid, + "errors": errors, + "warnings": warnings, + "validated_variables": params.variables.keys().collect::>(), + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "validate_role_vars".to_string(), + description: "Validate variable configuration for an Ansible role before deployment. \ + Checks for required variables, type compatibility, and warns about unknown variables. \ + Returns validation status with specific errors/warnings." + .to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "role_name": { + "type": "string", + "description": "Name of the Ansible role" + }, + "variables": { + "type": "object", + "description": "Key-value pairs of variables to validate", + "additionalProperties": true + } + }, + "required": ["role_name", "variables"] + }), + } + } +} + +/// Tool: deploy_role - Execute Ansible role on remote server via SSH +pub struct DeployRoleTool; + +#[async_trait] +impl ToolHandler for DeployRoleTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + #[derive(Deserialize)] + struct Args { + server_ip: String, + role_name: String, + variables: HashMap, + #[serde(default)] + ssh_user: Option, + #[serde(default)] + ssh_key_path: Option, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + // Validate role exists + let role = get_role_details_from_fs(¶ms.role_name)?; + + // Validate variables + let mut errors = vec![]; + for (var_name, var_def) in &role.variables { + if var_def.required && !params.variables.contains_key(var_name) { + errors.push(format!("Required variable '{}' is missing", var_name)); + } + } + + if !errors.is_empty() { + return Ok(ToolContent::Text { + text: serde_json::to_string(&json!({ + "status": "validation_failed", + "errors": errors, + })) + .unwrap(), + }); + } + + // TODO: Implement actual Ansible playbook execution + // This would interface with the Install Service or execute ansible-playbook directly + // For now, return a placeholder response + + let ssh_user = params.ssh_user.unwrap_or_else(|| "root".to_string()); + let ssh_key = params.ssh_key_path.unwrap_or_else(|| "/root/.ssh/id_rsa".to_string()); + + let result = json!({ + "status": "queued", + "message": "Role deployment has been queued for execution", + "deployment": { + "role_name": role.name, + "server_ip": params.server_ip, + "ssh_user": ssh_user, + "ssh_key_path": ssh_key, + "variables": params.variables, + }, + "note": "This tool currently queues the deployment. Integration with Install Service pending." + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "deploy_role".to_string(), + description: "Deploy an Ansible role to a remote server via SSH. \ + Validates configuration, generates playbook, and executes on target. \ + Requires SSH access credentials (key-based authentication). \ + Used for SSH deployment method in Stack Builder." + .to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "server_ip": { + "type": "string", + "description": "Target server IP address or hostname" + }, + "role_name": { + "type": "string", + "description": "Name of the Ansible role to deploy" + }, + "variables": { + "type": "object", + "description": "Role variables (key-value pairs)", + "additionalProperties": true + }, + "ssh_user": { + "type": "string", + "description": "SSH username (default: 'root')", + "default": "root" + }, + "ssh_key_path": { + "type": "string", + "description": "Path to SSH private key (default: '/root/.ssh/id_rsa')", + "default": "/root/.ssh/id_rsa" + } + }, + "required": ["server_ip", "role_name", "variables"] + }), + } + } +} diff --git a/src/mcp/tools/cloud.rs b/src/mcp/tools/cloud.rs index 6729c0bb..23222848 100644 --- a/src/mcp/tools/cloud.rs +++ b/src/mcp/tools/cloud.rs @@ -108,7 +108,7 @@ impl ToolHandler for DeleteCloudTool { let args: Args = serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; - let cloud = db::cloud::fetch(&context.pg_pool, args.id) + let _cloud = db::cloud::fetch(&context.pg_pool, args.id) .await .map_err(|e| format!("Cloud error: {}", e))? .ok_or_else(|| "Cloud not found".to_string())?; diff --git a/src/mcp/tools/marketplace_admin.rs b/src/mcp/tools/marketplace_admin.rs new file mode 100644 index 00000000..64a63617 --- /dev/null +++ b/src/mcp/tools/marketplace_admin.rs @@ -0,0 +1,502 @@ +use async_trait::async_trait; +use serde_json::{json, Value}; + +use crate::db; +use crate::helpers::security_validator; +use crate::mcp::protocol::{Tool, ToolContent}; +use crate::mcp::registry::{ToolContext, ToolHandler}; +use serde::Deserialize; + +fn require_admin(context: &ToolContext) -> Result<(), String> { + let role = context.user.role.as_str(); + if role != "admin_service" && role != "group_admin" && role != "root" { + return Err("Access denied: admin role required".to_string()); + } + Ok(()) +} + +/// List submitted marketplace templates awaiting admin review +pub struct AdminListSubmittedTemplatesTool; + +#[async_trait] +impl ToolHandler for AdminListSubmittedTemplatesTool { + async fn execute(&self, _args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + let templates = db::marketplace::admin_list_submitted(&context.pg_pool) + .await + .map_err(|e| format!("Database error: {}", e))?; + + let result = json!({ + "count": templates.len(), + "templates": templates, + }); + + tracing::info!("Admin listed {} submitted templates", templates.len()); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_list_submitted_templates".to_string(), + description: "List marketplace templates submitted for review. Returns templates with status 'submitted' awaiting admin approval or rejection.".to_string(), + input_schema: json!({ + "type": "object", + "properties": {}, + "required": [] + }), + } + } +} + +/// Get detailed information about a specific marketplace template including versions and reviews +pub struct AdminGetTemplateDetailTool; + +#[async_trait] +impl ToolHandler for AdminGetTemplateDetailTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + #[derive(Deserialize)] + struct Args { + template_id: String, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let id = uuid::Uuid::parse_str(¶ms.template_id) + .map_err(|_| "Invalid UUID format for template_id".to_string())?; + + let template = db::marketplace::get_by_id(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error: {}", e))? + .ok_or_else(|| "Template not found".to_string())?; + + let versions = db::marketplace::list_versions_by_template(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error fetching versions: {}", e))?; + + let reviews = db::marketplace::list_reviews_by_template(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error fetching reviews: {}", e))?; + + let result = json!({ + "template": template, + "versions": versions, + "reviews": reviews, + }); + + tracing::info!( + "Admin fetched detail for template {} ({} versions, {} reviews)", + id, + versions.len(), + reviews.len() + ); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_get_template_detail".to_string(), + description: "Get full details of a marketplace template including all versions (with stack_definition, changelog) and review history (decisions, reasons, security checklist).".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template to inspect" + } + }, + "required": ["template_id"] + }), + } + } +} + +/// Approve a submitted marketplace template +pub struct AdminApproveTemplateTool; + +#[async_trait] +impl ToolHandler for AdminApproveTemplateTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + #[derive(Deserialize)] + struct Args { + template_id: String, + #[serde(default)] + reason: Option, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let id = uuid::Uuid::parse_str(¶ms.template_id) + .map_err(|_| "Invalid UUID format for template_id".to_string())?; + + let updated = db::marketplace::admin_decide( + &context.pg_pool, + &id, + &context.user.id, + "approved", + params.reason.as_deref(), + ) + .await + .map_err(|e| format!("Database error: {}", e))?; + + if !updated { + return Err("Template not found or not in a reviewable state".to_string()); + } + + tracing::info!("Admin {} approved template {}", context.user.id, id); + + let result = json!({ + "template_id": params.template_id, + "decision": "approved", + "message": "Template has been approved. A product record will be auto-created by database trigger.", + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_approve_template".to_string(), + description: "Approve a submitted marketplace template. This changes the template status to 'approved' and triggers automatic product creation.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template to approve" + }, + "reason": { + "type": "string", + "description": "Optional approval note/comment" + } + }, + "required": ["template_id"] + }), + } + } +} + +/// Reject a submitted marketplace template +pub struct AdminRejectTemplateTool; + +#[async_trait] +impl ToolHandler for AdminRejectTemplateTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + #[derive(Deserialize)] + struct Args { + template_id: String, + reason: String, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let id = uuid::Uuid::parse_str(¶ms.template_id) + .map_err(|_| "Invalid UUID format for template_id".to_string())?; + + let updated = db::marketplace::admin_decide( + &context.pg_pool, + &id, + &context.user.id, + "rejected", + Some(¶ms.reason), + ) + .await + .map_err(|e| format!("Database error: {}", e))?; + + if !updated { + return Err("Template not found or not in a reviewable state".to_string()); + } + + tracing::info!( + "Admin {} rejected template {} (reason: {})", + context.user.id, + id, + params.reason + ); + + let result = json!({ + "template_id": params.template_id, + "decision": "rejected", + "reason": params.reason, + "message": "Template has been rejected. The creator will be notified.", + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_reject_template".to_string(), + description: "Reject a submitted marketplace template with a reason. The template creator will be notified of the rejection.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template to reject" + }, + "reason": { + "type": "string", + "description": "Reason for rejection (required, shown to template creator)" + } + }, + "required": ["template_id", "reason"] + }), + } + } +} + +/// List all versions of a specific marketplace template +pub struct AdminListTemplateVersionsTool; + +#[async_trait] +impl ToolHandler for AdminListTemplateVersionsTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + #[derive(Deserialize)] + struct Args { + template_id: String, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let id = uuid::Uuid::parse_str(¶ms.template_id) + .map_err(|_| "Invalid UUID format for template_id".to_string())?; + + let versions = db::marketplace::list_versions_by_template(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error: {}", e))?; + + let result = json!({ + "template_id": params.template_id, + "count": versions.len(), + "versions": versions, + }); + + tracing::info!( + "Admin listed {} versions for template {}", + versions.len(), + id + ); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_list_template_versions".to_string(), + description: "List all versions of a marketplace template including stack_definition, changelog, and version metadata.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template" + } + }, + "required": ["template_id"] + }), + } + } +} + +/// List review history for a marketplace template +pub struct AdminListTemplateReviewsTool; + +#[async_trait] +impl ToolHandler for AdminListTemplateReviewsTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + #[derive(Deserialize)] + struct Args { + template_id: String, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let id = uuid::Uuid::parse_str(¶ms.template_id) + .map_err(|_| "Invalid UUID format for template_id".to_string())?; + + let reviews = db::marketplace::list_reviews_by_template(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error: {}", e))?; + + let result = json!({ + "template_id": params.template_id, + "count": reviews.len(), + "reviews": reviews, + }); + + tracing::info!( + "Admin listed {} reviews for template {}", + reviews.len(), + id + ); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_list_template_reviews".to_string(), + description: "List the review history of a marketplace template including past decisions, reasons, reviewer info, and security checklist results.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template" + } + }, + "required": ["template_id"] + }), + } + } +} + +/// Run automated security validation on a marketplace template's stack definition. +/// Returns the full security report AND the raw stack_definition for AI to perform +/// deeper analysis beyond what automated rules can catch. +pub struct AdminValidateTemplateSecurityTool; + +#[async_trait] +impl ToolHandler for AdminValidateTemplateSecurityTool { + async fn execute(&self, args: Value, context: &ToolContext) -> Result { + require_admin(context)?; + + #[derive(Deserialize)] + struct Args { + template_id: String, + /// If true, save the scan result as a review record + #[serde(default)] + save_report: Option, + } + + let params: Args = + serde_json::from_value(args).map_err(|e| format!("Invalid arguments: {}", e))?; + + let id = uuid::Uuid::parse_str(¶ms.template_id) + .map_err(|_| "Invalid UUID format for template_id".to_string())?; + + // Fetch template + let template = db::marketplace::get_by_id(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error: {}", e))? + .ok_or_else(|| "Template not found".to_string())?; + + // Fetch latest version with stack_definition + let versions = db::marketplace::list_versions_by_template(&context.pg_pool, id) + .await + .map_err(|e| format!("Database error: {}", e))?; + + let latest = versions + .iter() + .find(|v| v.is_latest == Some(true)) + .or_else(|| versions.first()) + .ok_or_else(|| "No versions found for this template — nothing to scan".to_string())?; + + // Run automated security checks + let report = security_validator::validate_stack_security(&latest.stack_definition); + + // Optionally save the scan result as a review record + let saved_review = if params.save_report.unwrap_or(true) { + let review = db::marketplace::save_security_scan( + &context.pg_pool, + &id, + &context.user.id, + report.to_checklist_json(), + ) + .await + .map_err(|e| format!("Failed to save security report: {}", e))?; + Some(review.id.to_string()) + } else { + None + }; + + tracing::info!( + "Security scan for template {}: overall_passed={}, risk_score={}", + id, + report.overall_passed, + report.risk_score + ); + + // Return both the automated report AND the raw stack_definition + // so the AI agent can perform deeper semantic analysis + let result = json!({ + "template": { + "id": template.id, + "name": template.name, + "status": template.status, + "creator_name": template.creator_name, + }, + "version": { + "version": latest.version, + "definition_format": latest.definition_format, + }, + "automated_scan": { + "overall_passed": report.overall_passed, + "risk_score": report.risk_score, + "no_secrets": report.no_secrets, + "no_hardcoded_creds": report.no_hardcoded_creds, + "valid_docker_syntax": report.valid_docker_syntax, + "no_malicious_code": report.no_malicious_code, + "recommendations": report.recommendations, + }, + "saved_review_id": saved_review, + "stack_definition_for_ai_review": latest.stack_definition, + "ai_review_instructions": "The automated scan above covers pattern-based checks. As an AI reviewer, please additionally analyze: 1) Whether the service architecture makes sense and is secure, 2) If environment variables have sensible defaults, 3) If there are any data exfiltration risks, 4) If resource limits are appropriate, 5) If the network topology is secure (unnecessary exposed ports), 6) Any other security concerns that static analysis cannot catch.", + }); + + Ok(ToolContent::Text { + text: serde_json::to_string(&result).unwrap(), + }) + } + + fn schema(&self) -> Tool { + Tool { + name: "admin_validate_template_security".to_string(), + description: "Run automated security validation on a template's stack definition. Checks for hardcoded secrets, credentials, Docker syntax issues, and malicious patterns (privileged containers, host mounts, crypto miners). Returns both the automated scan report and the raw stack_definition for AI to perform deeper semantic security analysis. Saves the security checklist to the review history.".to_string(), + input_schema: json!({ + "type": "object", + "properties": { + "template_id": { + "type": "string", + "description": "UUID of the template to security-scan" + }, + "save_report": { + "type": "boolean", + "description": "Whether to save the scan result as a review record (default: true)" + } + }, + "required": ["template_id"] + }), + } + } +} diff --git a/src/mcp/tools/mod.rs b/src/mcp/tools/mod.rs index d98e4ea4..cc06cc0d 100644 --- a/src/mcp/tools/mod.rs +++ b/src/mcp/tools/mod.rs @@ -1,7 +1,9 @@ +pub mod ansible_roles; pub mod cloud; pub mod compose; pub mod config; pub mod deployment; +pub mod marketplace_admin; pub mod monitoring; pub mod project; pub mod proxy; @@ -9,10 +11,12 @@ pub mod support; pub mod templates; pub mod user_service; +pub use ansible_roles::*; pub use cloud::*; pub use compose::*; pub use config::*; pub use deployment::*; +pub use marketplace_admin::*; pub use monitoring::*; pub use project::*; pub use proxy::*; diff --git a/src/mcp/tools/monitoring.rs b/src/mcp/tools/monitoring.rs index 4a7da122..b1167f6f 100644 --- a/src/mcp/tools/monitoring.rs +++ b/src/mcp/tools/monitoring.rs @@ -464,7 +464,7 @@ impl ToolHandler for DiagnoseDeploymentTool { let deployment_hash = info.deployment_hash.clone(); let mut status = info.status; let mut domain = info.domain; - let mut server_ip = info.server_ip; + let server_ip = info.server_ip; let mut apps_info: Option = info.apps.as_ref().map(|apps| { json!(apps .iter() diff --git a/src/mcp/tools/project.rs b/src/mcp/tools/project.rs index ab8b2a7c..9d2e5a6e 100644 --- a/src/mcp/tools/project.rs +++ b/src/mcp/tools/project.rs @@ -290,38 +290,32 @@ impl ToolHandler for CreateProjectAppTool { let mut resolved_image = params.image.unwrap_or_default().trim().to_string(); let mut resolved_name = params.name.clone(); let mut resolved_ports = params.ports.clone(); - - if resolved_image.is_empty() || resolved_name.is_none() || resolved_ports.is_none() { + let mut resolved_env = params.env.clone(); + let mut resolved_config_files = params.config_files.clone(); + + // Use enriched catalog endpoint for correct Docker image + default configs + if resolved_image.is_empty() + || resolved_name.is_none() + || resolved_ports.is_none() + || resolved_env.is_none() + { let client = UserServiceClient::new_public(&context.settings.user_service_url); let token = context.user.access_token.as_deref().unwrap_or(""); - let apps = client - .search_applications(token, Some(code)) - .await - .map_err(|e| format!("Failed to search applications: {}", e))?; - - let code_lower = code.to_lowercase(); - let matched = apps - .iter() - .find(|app| { - app.code - .as_deref() - .map(|c| c.to_lowercase() == code_lower) - .unwrap_or(false) - }) - .or_else(|| { - apps.iter().find(|app| { - app.name - .as_deref() - .map(|n| n.to_lowercase() == code_lower) - .unwrap_or(false) - }) - }) - .or_else(|| apps.first()); - - if let Some(app) = matched { + + // Try catalog endpoint first (has correct Docker image + default env/config) + // Gracefully handle total failure — proceed with defaults if User Service is unreachable + let catalog_app = match client.fetch_app_catalog(token, code).await { + Ok(app) => app, + Err(e) => { + tracing::warn!("Could not fetch app catalog for code={}: {}, proceeding with defaults", code, e); + None + } + }; + + if let Some(app) = catalog_app { if resolved_image.is_empty() { - if let Some(image) = app.docker_image.clone() { - resolved_image = image; + if let Some(image) = app.docker_image.as_ref().filter(|s| !s.is_empty()) { + resolved_image = image.clone(); } } @@ -332,9 +326,66 @@ impl ToolHandler for CreateProjectAppTool { } if resolved_ports.is_none() { - if let Some(port) = app.default_port { - if port > 0 { - resolved_ports = Some(json!([format!("{0}:{0}", port)])); + // Prefer default_ports (structured) from catalog + if let Some(ports) = &app.default_ports { + if let Some(arr) = ports.as_array() { + if !arr.is_empty() { + let port_strings: Vec = arr + .iter() + .filter_map(|p| { + let port = p + .get("port") + .and_then(|v| v.as_i64()) + .or_else(|| p.as_i64()); + port.map(|p| { + serde_json::Value::String(format!("{0}:{0}", p)) + }) + }) + .collect(); + if !port_strings.is_empty() { + resolved_ports = Some(json!(port_strings)); + } + } + } + } + // Fallback to default_port scalar + if resolved_ports.is_none() { + if let Some(port) = app.default_port { + if port > 0 { + resolved_ports = Some(json!([format!("{0}:{0}", port)])); + } + } + } + } + + // Populate default environment from catalog if not provided by user + if resolved_env.is_none() { + if let Some(env_obj) = &app.default_env { + if let Some(obj) = env_obj.as_object() { + if !obj.is_empty() { + // Convert { "KEY": "value" } to [{ "name": "KEY", "value": "value" }] + let env_arr: Vec = obj + .iter() + .map(|(k, v)| { + json!({ + "name": k, + "value": v.as_str().unwrap_or("") + }) + }) + .collect(); + resolved_env = Some(json!(env_arr)); + } + } + } + } + + // Populate default config_files from catalog if not provided + if resolved_config_files.is_none() { + if let Some(cf) = &app.default_config_files { + if let Some(arr) = cf.as_array() { + if !arr.is_empty() { + resolved_config_files = Some(cf.clone()); + } } } } @@ -350,7 +401,7 @@ impl ToolHandler for CreateProjectAppTool { app.code = code.to_string(); app.name = resolved_name.unwrap_or_else(|| code.to_string()); app.image = resolved_image; - app.environment = params.env.clone(); + app.environment = resolved_env; app.ports = resolved_ports; app.volumes = params.volumes.clone(); app.domain = params.domain.clone(); @@ -366,7 +417,7 @@ impl ToolHandler for CreateProjectAppTool { app.enabled = params.enabled.or(Some(true)); app.deploy_order = params.deploy_order; - if let Some(config_files) = params.config_files.clone() { + if let Some(config_files) = resolved_config_files { let mut labels = app.labels.clone().unwrap_or(json!({})); if let Some(obj) = labels.as_object_mut() { obj.insert("config_files".to_string(), config_files); @@ -670,7 +721,7 @@ impl ToolHandler for GetDeploymentResourcesTool { .map_err(|e| format!("Failed to lookup deployment: {}", e))? .ok_or_else(|| "Deployment not found".to_string())?; deployment.project_id - } else if let Some(deployment_id) = params.deployment_id { + } else if let Some(_deployment_id) = params.deployment_id { // Legacy: try to find project by deployment ID // This would need a User Service lookup - for now return error return Err("Please provide deployment_hash or project_id".to_string()); diff --git a/src/mcp/tools/support.rs b/src/mcp/tools/support.rs index 05839197..f1eb0b03 100644 --- a/src/mcp/tools/support.rs +++ b/src/mcp/tools/support.rs @@ -98,7 +98,7 @@ impl ToolHandler for EscalateToSupportTool { // Store escalation record let escalation_id = uuid::Uuid::new_v4().to_string(); - let escalation_record = json!({ + let _escalation_record = json!({ "id": escalation_id, "user_id": context.user.id, "reason": params.reason, diff --git a/src/mcp/tools/templates.rs b/src/mcp/tools/templates.rs index 16dafba9..96e52fbf 100644 --- a/src/mcp/tools/templates.rs +++ b/src/mcp/tools/templates.rs @@ -117,7 +117,7 @@ pub struct ListTemplatesTool; #[async_trait] impl ToolHandler for ListTemplatesTool { - async fn execute(&self, args: Value, context: &ToolContext) -> Result { + async fn execute(&self, args: Value, _context: &ToolContext) -> Result { #[derive(Deserialize)] struct Args { #[serde(default)] diff --git a/src/middleware/authentication/manager_middleware.rs b/src/middleware/authentication/manager_middleware.rs index 32251fbe..0864ce59 100644 --- a/src/middleware/authentication/manager_middleware.rs +++ b/src/middleware/authentication/manager_middleware.rs @@ -28,7 +28,7 @@ where type Future = LocalBoxFuture<'static, Result, Error>>; fn poll_ready(&self, ctx: &mut Context<'_>) -> Poll> { - if let Ok(mut service) = self.service.try_borrow_mut() { + if let Ok(service) = self.service.try_borrow_mut() { service.poll_ready(ctx) } else { Poll::Pending diff --git a/src/middleware/authentication/method/f_jwt.rs b/src/middleware/authentication/method/f_jwt.rs index 34b073ed..eeb44496 100644 --- a/src/middleware/authentication/method/f_jwt.rs +++ b/src/middleware/authentication/method/f_jwt.rs @@ -2,7 +2,6 @@ use crate::connectors::{ extract_bearer_token, parse_jwt_claims, user_from_jwt_claims, validate_jwt_expiration, }; use crate::middleware::authentication::get_header; -use crate::models; use actix_web::dev::ServiceRequest; use actix_web::HttpMessage; use std::sync::Arc; diff --git a/src/middleware/authorization.rs b/src/middleware/authorization.rs index c2b39fd2..5769df46 100644 --- a/src/middleware/authorization.rs +++ b/src/middleware/authorization.rs @@ -1,4 +1,3 @@ -use crate::configuration::parse_bool_env; use actix_casbin_auth::{ casbin::{function_map::key_match2, CoreApi, DefaultModel}, CasbinService, @@ -35,7 +34,7 @@ pub async fn try_new(db_connection_address: String) -> Result, pub deploy_count: Option, pub required_plan_name: Option, + pub price: Option, + pub billing_cycle: Option, + pub currency: Option, pub created_at: Option>, pub updated_at: Option>, pub approved_at: Option>, @@ -44,3 +47,15 @@ pub struct StackTemplateVersion { pub is_latest: Option, pub created_at: Option>, } + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default, sqlx::FromRow)] +pub struct StackTemplateReview { + pub id: Uuid, + pub template_id: Uuid, + pub reviewer_user_id: Option, + pub decision: String, + pub review_reason: Option, + pub security_checklist: Option, + pub submitted_at: Option>, + pub reviewed_at: Option>, +} diff --git a/src/models/project_app.rs b/src/models/project_app.rs index a9657f30..6882056c 100644 --- a/src/models/project_app.rs +++ b/src/models/project_app.rs @@ -95,6 +95,9 @@ pub struct ProjectApp { /// When set, this app is a child service discovered from parent's compose file #[sqlx(default)] pub parent_app_code: Option, + /// Deployment this app belongs to. NULL for legacy apps created before deployment scoping. + #[sqlx(default)] + pub deployment_id: Option, } impl ProjectApp { @@ -131,6 +134,7 @@ impl ProjectApp { vault_sync_version: None, config_hash: None, parent_app_code: None, + deployment_id: None, } } @@ -201,6 +205,7 @@ impl Default for ProjectApp { vault_sync_version: None, config_hash: None, parent_app_code: None, + deployment_id: None, } } } diff --git a/src/models/server.rs b/src/models/server.rs index ec53c5a7..57fb2523 100644 --- a/src/models/server.rs +++ b/src/models/server.rs @@ -7,6 +7,8 @@ pub struct Server { pub id: i32, pub user_id: String, pub project_id: i32, + /// Reference to the cloud provider (DO, Hetzner, AWS, etc.) + pub cloud_id: Option, #[validate(min_length = 2)] #[validate(max_length = 50)] pub region: Option, @@ -52,6 +54,7 @@ impl Default for Server { id: 0, user_id: String::new(), project_id: 0, + cloud_id: None, region: None, zone: None, server: None, @@ -77,3 +80,55 @@ fn default_connection_mode() -> String { fn default_key_status() -> String { "none".to_string() } + +/// Server with provider information for API responses +/// Used when we need to show the cloud provider name alongside server data +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ServerWithProvider { + pub id: i32, + pub user_id: String, + pub project_id: i32, + pub cloud_id: Option, + /// Cloud provider name (e.g., "digital_ocean", "hetzner", "aws") + pub cloud: Option, + pub region: Option, + pub zone: Option, + pub server: Option, + pub os: Option, + pub disk_type: Option, + pub created_at: DateTime, + pub updated_at: DateTime, + pub srv_ip: Option, + pub ssh_port: Option, + pub ssh_user: Option, + pub vault_key_path: Option, + pub connection_mode: String, + pub key_status: String, + pub name: Option, +} + +impl From for ServerWithProvider { + fn from(server: Server) -> Self { + Self { + id: server.id, + user_id: server.user_id, + project_id: server.project_id, + cloud_id: server.cloud_id, + cloud: None, // Will be populated by the query + region: server.region, + zone: server.zone, + server: server.server, + os: server.os, + disk_type: server.disk_type, + created_at: server.created_at, + updated_at: server.updated_at, + srv_ip: server.srv_ip, + ssh_port: server.ssh_port, + ssh_user: server.ssh_user, + vault_key_path: server.vault_key_path, + connection_mode: server.connection_mode, + key_status: server.key_status, + name: server.name, + } + } +} diff --git a/src/project_app/hydration.rs b/src/project_app/hydration.rs new file mode 100644 index 00000000..960e9474 --- /dev/null +++ b/src/project_app/hydration.rs @@ -0,0 +1,319 @@ +pub use hydrate::{hydrate_project_app, hydrate_single_app, HydratedProjectApp}; + +mod hydrate { + use actix_web::Error; + use serde_json::{json, Value}; + use sqlx::PgPool; + + use crate::helpers::JsonResponse; + use crate::models::{Project, ProjectApp}; + use crate::services::{AppConfig, ProjectAppService, VaultError, VaultService}; + + #[derive(Debug, Clone, serde::Serialize)] + pub struct ConfigFile { + pub name: String, + pub content: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub template_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub destination_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub file_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub owner: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub group: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub is_ansible: Option, + } + + #[derive(Debug, Clone, serde::Serialize)] + pub struct HydratedProjectApp { + pub id: i32, + pub project_id: i32, + pub code: String, + pub name: String, + pub image: String, + pub environment: Value, + pub ports: Value, + pub volumes: Value, + pub domain: Option, + pub ssl_enabled: bool, + pub resources: Value, + pub restart_policy: String, + pub command: Option, + pub entrypoint: Option, + pub networks: Value, + pub depends_on: Value, + pub healthcheck: Value, + pub labels: Value, + pub config_files: Vec, + pub compose: Option, + pub template_source: Option, + pub enabled: bool, + pub deploy_order: Option, + pub created_at: chrono::DateTime, + pub updated_at: chrono::DateTime, + pub parent_app_code: Option, + } + + impl HydratedProjectApp { + fn from_project_app(app: ProjectApp) -> Self { + Self { + id: app.id, + project_id: app.project_id, + code: app.code, + name: app.name, + image: app.image, + environment: app.environment.unwrap_or(json!({})), + ports: app.ports.unwrap_or(json!([])), + volumes: app.volumes.unwrap_or(json!([])), + domain: app.domain, + ssl_enabled: app.ssl_enabled.unwrap_or(false), + resources: app.resources.unwrap_or(json!({})), + restart_policy: app + .restart_policy + .unwrap_or_else(|| "unless-stopped".to_string()), + command: app.command, + entrypoint: app.entrypoint, + networks: app.networks.unwrap_or(json!([])), + depends_on: app.depends_on.unwrap_or(json!([])), + healthcheck: app.healthcheck.unwrap_or(json!({})), + labels: app.labels.unwrap_or(json!({})), + config_files: Vec::new(), + compose: None, + template_source: app.template_source, + enabled: app.enabled.unwrap_or(true), + deploy_order: app.deploy_order, + created_at: app.created_at, + updated_at: app.updated_at, + parent_app_code: app.parent_app_code, + } + } + } + + pub async fn hydrate_project_app( + pool: &PgPool, + project: &Project, + app: ProjectApp, + ) -> Result { + hydrate_single_app(pool, project, app).await + } + + pub async fn hydrate_single_app( + _pool: &PgPool, + project: &Project, + app: ProjectApp, + ) -> Result { + let mut hydrated = HydratedProjectApp::from_project_app(app.clone()); + let mut compose_config: Option = None; + let mut env_config: Option = None; + + if !hydrated.networks.is_array() + || hydrated + .networks + .as_array() + .map(|a| a.is_empty()) + .unwrap_or(true) + { + hydrated.networks = json!([]); + } + + if let Some(default_network) = ProjectAppService::default_network_from_project(project) { + if hydrated + .networks + .as_array() + .map(|arr| arr.is_empty()) + .unwrap_or(true) + { + hydrated.networks = json!([default_network]); + } + } + + let deployment_hash = project + .request_json + .get("report") + .and_then(|r| r.get("deployment_hash")) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + + if let Some(hash) = deployment_hash { + if let Ok(vault) = VaultService::from_env() { + if let Some(vault) = vault { + if let Some(compose) = fetch_optional_config(&vault, &hash, &app.code).await? { + hydrated.compose = Some(compose.content.clone()); + compose_config = Some(compose); + } + + if let Some(config) = + fetch_optional_config(&vault, &hash, &format!("{}_env", app.code)).await? + { + hydrated.environment = parse_env_to_json(&config.content); + env_config = Some(config); + } + + if let Some(config_bundle) = fetch_optional_config(&vault, &hash, &format!("{}_configs", app.code)) + .await? + { + hydrated.config_files = parse_config_bundle(&config_bundle.content); + } + } + } + } + + if hydrated.config_files.is_empty() { + if let Some(config_files) = app.config_files.and_then(|c| c.as_array().cloned()) { + hydrated.config_files = config_files + .into_iter() + .filter_map(|file| { + let name = file.get("name").and_then(|v| v.as_str())?.to_string(); + let content = file.get("content").and_then(|v| v.as_str())?.to_string(); + Some(ConfigFile { + name, + content, + template_path: file + .get("template_path") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + destination_path: file + .get("destination_path") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + file_mode: file + .get("file_mode") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + owner: file + .get("owner") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + group: file + .get("group") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + is_ansible: file.get("is_ansible").and_then(|v| v.as_bool()), + }) + }) + .collect(); + } + } + + if let Some(config) = env_config { + let env_name = file_name_from_path(&config.destination_path, ".env"); + push_config_file_if_missing(&mut hydrated.config_files, &env_name, &config); + } + + if let Some(config) = compose_config { + let compose_name = file_name_from_path(&config.destination_path, "docker-compose.yml"); + push_config_file_if_missing(&mut hydrated.config_files, &compose_name, &config); + } + + Ok(hydrated) + } + + async fn fetch_optional_config( + vault: &VaultService, + deployment_hash: &str, + config_key: &str, + ) -> Result, Error> { + match vault.fetch_app_config(deployment_hash, config_key).await { + Ok(config) => Ok(Some(config)), + Err(VaultError::NotFound(_)) => Ok(None), + Err(error) => Err(JsonResponse::internal_server_error(error.to_string())), + } + } + + fn file_name_from_path(path: &str, fallback: &str) -> String { + path.rsplit('/') + .find(|part| !part.is_empty()) + .unwrap_or(fallback) + .to_string() + } + + fn push_config_file_if_missing( + config_files: &mut Vec, + name: &str, + config: &AppConfig, + ) { + if config_files.iter().any(|file| file.name == name) { + return; + } + + let destination_path = if config.destination_path.is_empty() { + None + } else { + Some(config.destination_path.clone()) + }; + + config_files.push(ConfigFile { + name: name.to_string(), + content: config.content.clone(), + template_path: None, + destination_path, + file_mode: Some(config.file_mode.clone()), + owner: config.owner.clone(), + group: config.group.clone(), + is_ansible: None, + }); + } + + fn parse_env_to_json(content: &str) -> Value { + let mut env_map = serde_json::Map::new(); + for line in content.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + if let Some((key, value)) = line.split_once('=') { + env_map.insert( + key.trim().to_string(), + Value::String(value.trim().to_string()), + ); + } else if let Some((key, value)) = line.split_once(':') { + env_map.insert( + key.trim().to_string(), + Value::String(value.trim().to_string()), + ); + } + } + Value::Object(env_map) + } + + fn parse_config_bundle(content: &str) -> Vec { + if let Ok(json) = serde_json::from_str::>(content) { + json.into_iter() + .filter_map(|file| { + let name = file.get("name")?.as_str()?.to_string(); + let content = file.get("content")?.as_str()?.to_string(); + Some(ConfigFile { + name, + content, + template_path: file + .get("template_path") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + destination_path: file + .get("destination_path") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + file_mode: file + .get("file_mode") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + owner: file + .get("owner") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + group: file + .get("group") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()), + is_ansible: file.get("is_ansible").and_then(|v| v.as_bool()), + }) + }) + .collect() + } else { + Vec::new() + } + } +} diff --git a/src/project_app/mapping.rs b/src/project_app/mapping.rs index 85897aad..2a6d035d 100644 --- a/src/project_app/mapping.rs +++ b/src/project_app/mapping.rs @@ -365,5 +365,6 @@ pub(crate) fn merge_project_app(existing: ProjectApp, incoming: ProjectApp) -> P vault_sync_version: existing.vault_sync_version, config_hash: existing.config_hash, parent_app_code: incoming.parent_app_code.or(existing.parent_app_code), + deployment_id: incoming.deployment_id.or(existing.deployment_id), } } diff --git a/src/project_app/mod.rs b/src/project_app/mod.rs index 10e1badf..5a4e450a 100644 --- a/src/project_app/mod.rs +++ b/src/project_app/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod hydration; pub(crate) mod mapping; pub(crate) mod upsert; pub(crate) mod vault; diff --git a/src/project_app/upsert.rs b/src/project_app/upsert.rs index 66cc31f9..8d77aa1f 100644 --- a/src/project_app/upsert.rs +++ b/src/project_app/upsert.rs @@ -27,6 +27,39 @@ pub(crate) async fn upsert_app_config_for_deploy( serde_json::to_string_pretty(parameters).unwrap_or_else(|_| parameters.to_string()) ); + // Resolve the actual deployment record ID from deployment_hash + // (deployment_id parameter is actually project_id in the current code) + let actual_deployment_id = match crate::db::deployment::fetch_by_deployment_hash( + pg_pool, + deployment_hash, + ) + .await + { + Ok(Some(dep)) => { + tracing::info!( + "[UPSERT_APP_CONFIG] Resolved deployment.id={} from hash={}", + dep.id, + deployment_hash + ); + Some(dep.id) + } + Ok(None) => { + tracing::warn!( + "[UPSERT_APP_CONFIG] No deployment found for hash={}, deployment_id will be NULL", + deployment_hash + ); + None + } + Err(e) => { + tracing::warn!( + "[UPSERT_APP_CONFIG] Failed to resolve deployment for hash={}: {}", + deployment_hash, + e + ); + None + } + }; + // Fetch project from DB let project = match crate::db::project::fetch(pg_pool, deployment_id).await { Ok(Some(p)) => { @@ -107,13 +140,20 @@ pub(crate) async fn upsert_app_config_for_deploy( // Log final project_app before upsert tracing::info!( - "[UPSERT_APP_CONFIG] Final project_app - code: {}, name: {}, image: {}, env: {:?}", + "[UPSERT_APP_CONFIG] Final project_app - code: {}, name: {}, image: {}, env: {:?}, deployment_id: {:?}", project_app.code, project_app.name, project_app.image, - project_app.environment + project_app.environment, + project_app.deployment_id ); + // Set deployment_id on the app to scope it to this specific deployment + let mut project_app = project_app; + if project_app.deployment_id.is_none() { + project_app.deployment_id = actual_deployment_id; + } + // Upsert app config and sync to Vault match app_service .upsert(&project_app, &project, deployment_hash) diff --git a/src/project_app/vault.rs b/src/project_app/vault.rs index e99bfeea..290e2f1b 100644 --- a/src/project_app/vault.rs +++ b/src/project_app/vault.rs @@ -71,7 +71,7 @@ pub(crate) async fn store_configs_to_vault_from_params( let destination_path = resolve_destination_path( file, - format!("{}/{}/config/{}", config_base_path, app_code, file_name), + format!("{}/{}/{}", config_base_path, app_code, file_name), ); let config = build_app_config(content, content_type, destination_path, file, "0644"); diff --git a/src/routes/agent/report.rs b/src/routes/agent/report.rs index 7c46ca5a..bb7baec2 100644 --- a/src/routes/agent/report.rs +++ b/src/routes/agent/report.rs @@ -163,6 +163,66 @@ pub async fn report_handler( // Remove from queue if still there (shouldn't be, but cleanup) let _ = db::command::remove_from_queue(agent_pool.as_ref(), &payload.command_id).await; + // Cleanup project_app record when remove_app command completes successfully + if command.r#type == "remove_app" && status == models::CommandStatus::Completed { + if let Some(ref params) = command.parameters { + if let Some(app_code) = params.get("app_code").and_then(|v| v.as_str()) { + match db::deployment::fetch_by_deployment_hash( + agent_pool.as_ref(), + &payload.deployment_hash, + ) + .await + { + Ok(Some(deployment)) => { + match db::project_app::delete_by_project_and_code( + agent_pool.as_ref(), + deployment.project_id, + app_code, + ) + .await + { + Ok(true) => { + tracing::info!( + deployment_hash = %payload.deployment_hash, + app_code = %app_code, + "Deleted project_app record after successful remove_app" + ); + } + Ok(false) => { + tracing::debug!( + deployment_hash = %payload.deployment_hash, + app_code = %app_code, + "No project_app record found to delete (may have been removed already)" + ); + } + Err(e) => { + tracing::warn!( + deployment_hash = %payload.deployment_hash, + app_code = %app_code, + error = %e, + "Failed to delete project_app record after remove_app" + ); + } + } + } + Ok(None) => { + tracing::warn!( + deployment_hash = %payload.deployment_hash, + "Deployment not found; cannot clean up project_app" + ); + } + Err(e) => { + tracing::warn!( + deployment_hash = %payload.deployment_hash, + error = %e, + "Failed to fetch deployment for project_app cleanup" + ); + } + } + } + } + } + // Log audit event let audit_log = models::AuditLog::new( Some(agent.id), diff --git a/src/routes/agent/snapshot.rs b/src/routes/agent/snapshot.rs index 5b88b606..e22cd30b 100644 --- a/src/routes/agent/snapshot.rs +++ b/src/routes/agent/snapshot.rs @@ -1,7 +1,7 @@ use crate::db; use crate::forms::status_panel::HealthCommandReport; use crate::helpers::{AgentPgPool, JsonResponse}; -use crate::models::{self, Command, ProjectApp}; +use crate::models::{Command, ProjectApp}; use actix_web::{get, web, Responder, Result}; use serde::{Deserialize, Serialize}; @@ -84,9 +84,9 @@ pub async fn snapshot_handler( .flatten(); tracing::debug!("[SNAPSHOT HANDLER] Deployment : {:?}", deployment); - // Fetch apps for the project + // Fetch apps scoped to this specific deployment (falls back to project-level if no deployment-scoped apps) let apps = if let Some(deployment) = &deployment { - db::project_app::fetch_by_project(agent_pool.get_ref(), deployment.project_id) + db::project_app::fetch_by_deployment(agent_pool.get_ref(), deployment.project_id, deployment.id) .await .unwrap_or_default() } else { diff --git a/src/routes/command/create.rs b/src/routes/command/create.rs index 1774f48d..259c2986 100644 --- a/src/routes/command/create.rs +++ b/src/routes/command/create.rs @@ -1,5 +1,5 @@ use crate::configuration::Settings; -use crate::db::{self, project}; +use crate::db; use crate::forms::status_panel; use crate::helpers::project::builder::parse_compose_services; use crate::helpers::JsonResponse; @@ -264,6 +264,7 @@ pub async fn create_handler( project_id, app_code, compose_content, + &req.deployment_hash, ) .await; } @@ -556,7 +557,19 @@ pub async fn discover_and_register_child_services( project_id: i32, parent_app_code: &str, compose_content: &str, + deployment_hash: &str, ) -> usize { + // Resolve actual deployment ID from hash for scoping apps per deployment + let actual_deployment_id = match crate::db::deployment::fetch_by_deployment_hash( + pg_pool, + deployment_hash, + ) + .await + { + Ok(Some(dep)) => Some(dep.id), + _ => None, + }; + // Parse the compose file to extract services let services = match parse_compose_services(compose_content) { Ok(svcs) => svcs, @@ -630,6 +643,9 @@ pub async fn discover_and_register_child_services( // Set parent reference new_app.parent_app_code = Some(parent_app_code.to_string()); + // Scope to this specific deployment + new_app.deployment_id = actual_deployment_id; + // Convert environment to JSON object if !svc.environment.is_empty() { let mut env_map = serde_json::Map::new(); diff --git a/src/routes/marketplace/admin.rs b/src/routes/marketplace/admin.rs index 14dcbe29..9d6cf20c 100644 --- a/src/routes/marketplace/admin.rs +++ b/src/routes/marketplace/admin.rs @@ -1,6 +1,7 @@ use crate::connectors::user_service::UserServiceConnector; use crate::connectors::{MarketplaceWebhookSender, WebhookSenderConfig}; use crate::db; +use crate::helpers::security_validator; use crate::helpers::JsonResponse; use crate::models; use actix_web::{get, post, web, Responder, Result}; @@ -23,6 +24,42 @@ pub async fn list_submitted_handler( .map(|templates| JsonResponse::build().set_list(templates).ok("OK")) } +#[tracing::instrument(name = "Get template detail (admin)")] +#[get("/{id}")] +pub async fn detail_handler( + _admin: web::ReqData>, + path: web::Path<(String,)>, + pg_pool: web::Data, +) -> Result>> { + let id = uuid::Uuid::parse_str(&path.into_inner().0) + .map_err(|_| actix_web::error::ErrorBadRequest("Invalid UUID"))?; + + let template = db::marketplace::get_by_id(pg_pool.get_ref(), id) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))? + .ok_or_else(|| { + JsonResponse::::build().not_found("Template not found") + })?; + + let versions = db::marketplace::list_versions_by_template(pg_pool.get_ref(), id) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))?; + + let reviews = db::marketplace::list_reviews_by_template(pg_pool.get_ref(), id) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))?; + + let detail = serde_json::json!({ + "template": template, + "versions": versions, + "reviews": reviews, + }); + + Ok(JsonResponse::::build() + .set_item(detail) + .ok("OK")) +} + #[derive(serde::Deserialize, Debug)] pub struct AdminDecisionRequest { pub decision: String, // approved|rejected|needs_changes @@ -153,6 +190,128 @@ pub async fn reject_handler( Ok(JsonResponse::::build().ok("Rejected")) } + +#[derive(serde::Deserialize, Debug)] +pub struct UnapproveRequest { + pub reason: Option, +} + +#[tracing::instrument(name = "Unapprove template (admin)")] +#[post("/{id}/unapprove")] +pub async fn unapprove_handler( + admin: web::ReqData>, + path: web::Path<(String,)>, + pg_pool: web::Data, + body: web::Json, +) -> Result>> { + let id = uuid::Uuid::parse_str(&path.into_inner().0) + .map_err(|_| actix_web::error::ErrorBadRequest("Invalid UUID"))?; + let req = body.into_inner(); + + let updated = db::marketplace::admin_unapprove( + pg_pool.get_ref(), + &id, + &admin.id, + req.reason.as_deref(), + ) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))?; + + if !updated { + return Err(JsonResponse::::build() + .bad_request("Template is not approved or not found")); + } + + // Send webhook to remove from marketplace (same as rejection - deactivates product) + let template_id = id.to_string(); + tokio::spawn(async move { + match WebhookSenderConfig::from_env() { + Ok(config) => { + let sender = MarketplaceWebhookSender::new(config); + let span = + tracing::info_span!("send_unapproval_webhook", template_id = %template_id); + + if let Err(e) = sender + .send_template_rejected(&template_id) + .instrument(span) + .await + { + tracing::warn!("Failed to send template unapproval webhook: {:?}", e); + } + } + Err(e) => { + tracing::warn!("Webhook sender config not available: {}", e); + } + } + }); + + Ok(JsonResponse::::build().ok("Template unapproved and hidden from marketplace")) +} + +#[tracing::instrument(name = "Security scan template (admin)")] +#[post("/{id}/security-scan")] +pub async fn security_scan_handler( + admin: web::ReqData>, + path: web::Path<(String,)>, + pg_pool: web::Data, +) -> Result>> { + let id = uuid::Uuid::parse_str(&path.into_inner().0) + .map_err(|_| actix_web::error::ErrorBadRequest("Invalid UUID"))?; + + // Fetch template + let template = db::marketplace::get_by_id(pg_pool.get_ref(), id) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))? + .ok_or_else(|| { + JsonResponse::::build().not_found("Template not found") + })?; + + // Fetch versions to get latest stack_definition + let versions = db::marketplace::list_versions_by_template(pg_pool.get_ref(), id) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))?; + + let latest = versions + .iter() + .find(|v| v.is_latest == Some(true)) + .or_else(|| versions.first()) + .ok_or_else(|| { + JsonResponse::::build() + .bad_request("No versions found for this template") + })?; + + // Run automated security validation + let report = security_validator::validate_stack_security(&latest.stack_definition); + + // Save scan result as a review record + let review = db::marketplace::save_security_scan( + pg_pool.get_ref(), + &id, + &admin.id, + report.to_checklist_json(), + ) + .await + .map_err(|err| JsonResponse::::build().internal_server_error(err))?; + + let result = serde_json::json!({ + "template_id": template.id, + "template_name": template.name, + "version": latest.version, + "review_id": review.id, + "overall_passed": report.overall_passed, + "risk_score": report.risk_score, + "no_secrets": report.no_secrets, + "no_hardcoded_creds": report.no_hardcoded_creds, + "valid_docker_syntax": report.valid_docker_syntax, + "no_malicious_code": report.no_malicious_code, + "recommendations": report.recommendations, + }); + + Ok(JsonResponse::::build() + .set_item(result) + .ok("Security scan completed")) +} + #[tracing::instrument(name = "List available plans from User Service", skip(user_service))] #[get("/plans")] pub async fn list_plans_handler( diff --git a/src/routes/marketplace/creator.rs b/src/routes/marketplace/creator.rs index 35618c19..3fcfad23 100644 --- a/src/routes/marketplace/creator.rs +++ b/src/routes/marketplace/creator.rs @@ -18,6 +18,12 @@ pub struct CreateTemplateRequest { pub version: Option, pub stack_definition: Option, pub definition_format: Option, + /// Pricing: "free", "one_time", or "subscription" + pub plan_type: Option, + /// Price amount (e.g. 9.99). Ignored when plan_type is "free" + pub price: Option, + /// ISO 4217 currency code, default "USD" + pub currency: Option, } #[tracing::instrument(name = "Create draft template")] @@ -34,6 +40,11 @@ pub async fn create_handler( let creator_name = format!("{} {}", user.first_name, user.last_name); + // Normalize pricing: plan_type "free" forces price to 0 + let billing_cycle = req.plan_type.unwrap_or_else(|| "free".to_string()); + let price = if billing_cycle == "free" { 0.0 } else { req.price.unwrap_or(0.0) }; + let currency = req.currency.unwrap_or_else(|| "USD".to_string()); + // Check if template with this slug already exists for this user let existing = db::marketplace::get_by_slug_and_user(pg_pool.get_ref(), &req.slug, &user.id) .await @@ -51,6 +62,9 @@ pub async fn create_handler( req.category_code.as_deref(), Some(tags.clone()), Some(tech_stack.clone()), + Some(price), + Some(billing_cycle.as_str()), + Some(currency.as_str()), ) .await .map_err(|err| JsonResponse::::build().internal_server_error(err))?; @@ -83,6 +97,9 @@ pub async fn create_handler( req.category_code.as_deref(), tags, tech_stack, + price, + &billing_cycle, + ¤cy, ) .await .map_err(|err| { @@ -121,6 +138,9 @@ pub struct UpdateTemplateRequest { pub category_code: Option, pub tags: Option, pub tech_stack: Option, + pub plan_type: Option, + pub price: Option, + pub currency: Option, } #[tracing::instrument(name = "Update template metadata")] @@ -158,6 +178,9 @@ pub async fn update_handler( req.category_code.as_deref(), req.tags, req.tech_stack, + req.price, + req.plan_type.as_deref(), + req.currency.as_deref(), ) .await .map_err(|err| JsonResponse::::build().bad_request(err))?; @@ -203,6 +226,62 @@ pub async fn submit_handler( } } +#[derive(Debug, serde::Deserialize)] +pub struct ResubmitRequest { + pub version: String, + pub stack_definition: serde_json::Value, + pub definition_format: Option, + pub changelog: Option, +} + +#[tracing::instrument(name = "Resubmit template with new version")] +#[post("/{id}/resubmit")] +pub async fn resubmit_handler( + user: web::ReqData>, + path: web::Path<(String,)>, + pg_pool: web::Data, + body: web::Json, +) -> Result>> { + let id = uuid::Uuid::parse_str(&path.into_inner().0) + .map_err(|_| actix_web::error::ErrorBadRequest("Invalid UUID"))?; + + // Ownership check + let owner_id: String = sqlx::query_scalar!( + r#"SELECT creator_user_id FROM stack_template WHERE id = $1"#, + id + ) + .fetch_one(pg_pool.get_ref()) + .await + .map_err(|_| JsonResponse::::build().not_found("Not Found"))?; + + if owner_id != user.id { + return Err(JsonResponse::::build().forbidden("Forbidden")); + } + + let req = body.into_inner(); + + let version = db::marketplace::resubmit_with_new_version( + pg_pool.get_ref(), + &id, + &req.version, + req.stack_definition, + req.definition_format.as_deref(), + req.changelog.as_deref(), + ) + .await + .map_err(|err| JsonResponse::::build().bad_request(err))?; + + let result = serde_json::json!({ + "template_id": id, + "version": version, + "status": "submitted" + }); + + Ok(JsonResponse::::build() + .set_item(result) + .ok("Resubmitted for review")) +} + #[tracing::instrument(name = "List my templates")] #[get("/mine")] pub async fn mine_handler( diff --git a/src/routes/marketplace/mod.rs b/src/routes/marketplace/mod.rs index aa6afb93..d411d20c 100644 --- a/src/routes/marketplace/mod.rs +++ b/src/routes/marketplace/mod.rs @@ -4,6 +4,5 @@ pub mod creator; pub mod public; pub use admin::*; -pub use categories::*; pub use creator::*; pub use public::*; diff --git a/src/routes/project/app.rs b/src/routes/project/app.rs index a8925b81..4207995a 100644 --- a/src/routes/project/app.rs +++ b/src/routes/project/app.rs @@ -14,14 +14,27 @@ use crate::db; use crate::helpers::JsonResponse; -use crate::models; +use crate::models::{self, Project}; +use crate::services::{ProjectAppService}; use actix_web::{delete, get, post, put, web, Responder, Result}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use sqlx::PgPool; use std::sync::Arc; -use crate::services::ProjectAppService; +use crate::project_app::hydration::{hydrate_project_app, hydrate_single_app, HydratedProjectApp}; + +async fn hydrate_apps_with_metadata( + pool: &PgPool, + project: &Project, + apps: Vec, +) -> Result, actix_web::Error> { + let mut hydrated = Vec::with_capacity(apps.len()); + for app in apps { + hydrated.push(hydrate_project_app(pool, project, app).await?); + } + Ok(hydrated) +} /// Response for app configuration #[derive(Debug, Serialize)] @@ -145,7 +158,10 @@ pub async fn list_apps( .await .map_err(|e| JsonResponse::internal_server_error(e))?; - Ok(JsonResponse::build().set_list(apps).ok("OK")) + // Hydrate additional config metadata via helper + let hydrated = hydrate_apps_with_metadata(pg_pool.get_ref(), &project, apps).await?; + + Ok(JsonResponse::build().set_list(hydrated).ok("OK")) } /// Create or update an app in a project @@ -256,7 +272,9 @@ pub async fn get_app( .map_err(|e| JsonResponse::internal_server_error(e))? .ok_or_else(|| JsonResponse::not_found("App not found"))?; - Ok(JsonResponse::build().set_item(Some(app)).ok("OK")) + let hydrated = hydrate_single_app(pg_pool.get_ref(), &project, app).await?; + + Ok(JsonResponse::build().set_item(Some(hydrated)).ok("OK")) } /// Get app configuration (env vars, ports, domain, etc.) diff --git a/src/routes/project/deploy.rs b/src/routes/project/deploy.rs index 1b134e77..8beeb22e 100644 --- a/src/routes/project/deploy.rs +++ b/src/routes/project/deploy.rs @@ -249,50 +249,57 @@ pub async fn saved_item( } }; - let server = match db::server::fetch_by_project(pg_pool.get_ref(), dc.project.id.clone()).await - { - Ok(server) => { - // currently we support only one type of servers - //@todo multiple server types support - match server.into_iter().nth(0) { - Some(mut server) => { - // new updates - server.disk_type = form.server.disk_type.clone(); - server.region = form.server.region.clone(); - server.server = form.server.server.clone(); - server.zone = form.server.zone.clone(); - server.os = form.server.os.clone(); - server.user_id = user.id.clone(); - server.project_id = id; - server - } - None => { - // Create new server - // form.update_with(server.into()); - let mut server: models::Server = (&form.server).into(); - server.user_id = user.id.clone(); - server.project_id = id; - db::server::insert(pg_pool.get_ref(), server) - .await - .map(|server| server) - .map_err(|_| { - JsonResponse::::build() - .internal_server_error("Internal Server Error") - })? - } - } + // Handle server: if server_id provided, update existing; otherwise create new + let server = if let Some(server_id) = form.server.server_id { + // Update existing server + let existing = db::server::fetch(pg_pool.get_ref(), server_id) + .await + .map_err(|_| { + JsonResponse::::build().internal_server_error("Failed to fetch server") + })? + .ok_or_else(|| { + JsonResponse::::build().not_found("Server not found") + })?; + + // Verify ownership + if existing.user_id != user.id { + return Err(JsonResponse::::build().not_found("Server not found")); } - Err(_e) => { - return Err(JsonResponse::::build().not_found("No servers configured")); + + let mut server = existing; + server.disk_type = form.server.disk_type.clone(); + server.region = form.server.region.clone(); + server.server = form.server.server.clone(); + server.zone = form.server.zone.clone(); + server.os = form.server.os.clone(); + server.project_id = id; + server.srv_ip = form.server.srv_ip.clone(); + server.ssh_user = form.server.ssh_user.clone(); + server.ssh_port = form.server.ssh_port.or(server.ssh_port); + server.name = form.server.name.clone().or(server.name); + if form.server.connection_mode.is_some() { + server.connection_mode = form.server.connection_mode.clone().unwrap(); } - }; + server.cloud_id = Some(cloud_id); - let server = db::server::update(pg_pool.get_ref(), server) - .await - .map(|server| server) - .map_err(|_| { - JsonResponse::::build().internal_server_error("Internal Server Error") - })?; + db::server::update(pg_pool.get_ref(), server) + .await + .map_err(|_| { + JsonResponse::::build().internal_server_error("Failed to update server") + })? + } else { + // Create new server + let mut server: models::Server = (&form.server).into(); + server.user_id = user.id.clone(); + server.project_id = id; + server.cloud_id = Some(cloud_id); + + db::server::insert(pg_pool.get_ref(), server) + .await + .map_err(|_| { + JsonResponse::::build().internal_server_error("Failed to create server") + })? + }; // Building Payload for the 3-d party service through RabbitMQ // let mut payload = forms::project::Payload::default(); diff --git a/src/routes/project/discover.rs b/src/routes/project/discover.rs index ce9c80b1..fe6b6e63 100644 --- a/src/routes/project/discover.rs +++ b/src/routes/project/discover.rs @@ -8,7 +8,7 @@ use crate::helpers::JsonResponse; use crate::models::{self, ProjectApp}; use actix_web::{get, post, web, Responder, Result}; use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; +use serde_json::json; use sqlx::PgPool; use std::sync::Arc; @@ -72,7 +72,7 @@ pub struct ContainerImport { } /// Discover running containers for a deployment -/// +/// /// This endpoint compares running Docker containers (from recent health checks) /// with registered project_app records to identify: /// - Registered apps with running containers (synced) @@ -87,17 +87,17 @@ pub async fn discover_containers( pg_pool: web::Data, ) -> Result { let project_id = path.into_inner(); - + // Verify project ownership let project = db::project::fetch(pg_pool.get_ref(), project_id) .await .map_err(|e| JsonResponse::internal_server_error(e))? .ok_or_else(|| JsonResponse::not_found("Project not found"))?; - + if project.user_id != user.id { return Err(JsonResponse::not_found("Project not found")); } - + // Get deployment_hash from query or find it from project let deployment_hash = match &query.deployment_hash { Some(hash) => hash.clone(), @@ -106,31 +106,33 @@ pub async fn discover_containers( let deployment = db::deployment::fetch_by_project_id(pg_pool.get_ref(), project_id) .await .map_err(|e| JsonResponse::internal_server_error(e))?; - - deployment - .map(|d| d.deployment_hash) - .ok_or_else(|| JsonResponse::not_found("No deployment found for project. Please provide deployment_hash"))? + + deployment.map(|d| d.deployment_hash).ok_or_else(|| { + JsonResponse::not_found( + "No deployment found for project. Please provide deployment_hash", + ) + })? } }; - + // Fetch all apps registered in this project let registered_apps = db::project_app::fetch_by_project(pg_pool.get_ref(), project_id) .await .map_err(|e| JsonResponse::internal_server_error(e))?; - + // Fetch recent list_containers commands to get ALL running containers let container_commands = db::command::fetch_recent_by_deployment( pg_pool.get_ref(), &deployment_hash, - 50, // Last 50 commands to find list_containers results + 50, // Last 50 commands to find list_containers results false, // Include results ) .await .unwrap_or_default(); - + // Extract running containers from list_containers or health commands let mut running_containers: Vec = Vec::new(); - + // First, try to find a list_containers result (has ALL containers) for cmd in container_commands.iter() { if cmd.r#type == "list_containers" && cmd.status == "completed" { @@ -138,13 +140,25 @@ pub async fn discover_containers( // Parse list_containers result which contains array of all containers if let Some(containers_arr) = result.get("containers").and_then(|c| c.as_array()) { for c in containers_arr { - let name = c.get("name").and_then(|n| n.as_str()).unwrap_or("").to_string(); + let name = c + .get("name") + .and_then(|n| n.as_str()) + .unwrap_or("") + .to_string(); if name.is_empty() { continue; } - let status = c.get("status").and_then(|s| s.as_str()).unwrap_or("unknown").to_string(); - let image = c.get("image").and_then(|i| i.as_str()).unwrap_or("").to_string(); - + let status = c + .get("status") + .and_then(|s| s.as_str()) + .unwrap_or("unknown") + .to_string(); + let image = c + .get("image") + .and_then(|i| i.as_str()) + .unwrap_or("") + .to_string(); + if !running_containers.iter().any(|rc| rc.name == name) { running_containers.push(ContainerInfo { name: name.clone(), @@ -162,16 +176,19 @@ pub async fn discover_containers( } } } - + // Fallback: If no list_containers found, try health check results if running_containers.is_empty() { for cmd in container_commands.iter() { if cmd.r#type == "health" && cmd.status == "completed" { if let Some(result) = &cmd.result { // Try to extract from system_containers array first - if let Some(system_arr) = result.get("system_containers").and_then(|c| c.as_array()) { + if let Some(system_arr) = + result.get("system_containers").and_then(|c| c.as_array()) + { for c in system_arr { - let name = c.get("container_name") + let name = c + .get("container_name") .or_else(|| c.get("app_code")) .and_then(|n| n.as_str()) .unwrap_or("") @@ -179,30 +196,35 @@ pub async fn discover_containers( if name.is_empty() { continue; } - let status = c.get("container_state") + let status = c + .get("container_state") .or_else(|| c.get("status")) .and_then(|s| s.as_str()) .unwrap_or("unknown") .to_string(); - + if !running_containers.iter().any(|rc| rc.name == name) { running_containers.push(ContainerInfo { name: name.clone(), image: String::new(), status, - app_code: c.get("app_code").and_then(|a| a.as_str()).map(|s| s.to_string()), + app_code: c + .get("app_code") + .and_then(|a| a.as_str()) + .map(|s| s.to_string()), }); } } } - + // Also try app_code from single-app health checks if let Some(app_code) = result.get("app_code").and_then(|a| a.as_str()) { - let status = result.get("container_state") + let status = result + .get("container_state") .and_then(|s| s.as_str()) .unwrap_or("unknown") .to_string(); - + if !running_containers.iter().any(|c| c.name == app_code) { running_containers.push(ContainerInfo { name: app_code.to_string(), @@ -216,7 +238,7 @@ pub async fn discover_containers( } } } - + tracing::info!( project_id = project_id, deployment_hash = %deployment_hash, @@ -224,22 +246,21 @@ pub async fn discover_containers( running_count = running_containers.len(), "Discovered containers" ); - + // Classify containers let mut registered = Vec::new(); let mut unregistered = Vec::new(); let mut missing_containers = Vec::new(); - + // Find registered apps with running containers for app in ®istered_apps { - let matching_container = running_containers.iter() - .find(|c| { - // Try to match by app_code first - c.app_code.as_ref() == Some(&app.code) || + let matching_container = running_containers.iter().find(|c| { + // Try to match by app_code first + c.app_code.as_ref() == Some(&app.code) || // Or by container name matching app code container_matches_app(&c.name, &app.code) - }); - + }); + if let Some(container) = matching_container { registered.push(RegisteredContainerInfo { app_code: app.code.clone(), @@ -256,18 +277,18 @@ pub async fn discover_containers( }); } } - + // Find running containers not registered for container in &running_containers { - let is_registered = registered_apps.iter() - .any(|app| { - app.code == container.app_code.clone().unwrap_or_default() || - container_matches_app(&container.name, &app.code) - }); - + let is_registered = registered_apps.iter().any(|app| { + app.code == container.app_code.clone().unwrap_or_default() + || container_matches_app(&container.name, &app.code) + }); + if !is_registered { - let (suggested_code, suggested_name) = suggest_app_info(&container.name, &container.image); - + let (suggested_code, suggested_name) = + suggest_app_info(&container.name, &container.image); + unregistered.push(DiscoveredContainer { container_name: container.name.clone(), image: container.image.clone(), @@ -277,13 +298,13 @@ pub async fn discover_containers( }); } } - + let response = DiscoverResponse { registered, unregistered, missing_containers, }; - + tracing::info!( project_id = project_id, registered = response.registered.len(), @@ -291,7 +312,7 @@ pub async fn discover_containers( missing = response.missing_containers.len(), "Container discovery complete" ); - + Ok(JsonResponse::build() .set_item(response) .ok("Containers discovered")) @@ -307,28 +328,31 @@ pub async fn import_containers( pg_pool: web::Data, ) -> Result { let project_id = path.into_inner(); - + // Verify project ownership let project = db::project::fetch(pg_pool.get_ref(), project_id) .await .map_err(|e| JsonResponse::internal_server_error(e))? .ok_or_else(|| JsonResponse::not_found("Project not found"))?; - + if project.user_id != user.id { return Err(JsonResponse::not_found("Project not found")); } - + let mut imported = Vec::new(); let mut errors = Vec::new(); - + for container in &body.containers { // Check if app_code already exists let existing = db::project_app::fetch_by_project_and_code( pg_pool.get_ref(), project_id, - &container.app_code - ).await.ok().flatten(); - + &container.app_code, + ) + .await + .ok() + .flatten(); + if existing.is_some() { errors.push(format!( "App code '{}' already exists in project", @@ -336,7 +360,7 @@ pub async fn import_containers( )); continue; } - + // Create new project_app entry let app = ProjectApp { id: 0, // Will be set by database @@ -368,8 +392,9 @@ pub async fn import_containers( vault_sync_version: None, config_hash: None, parent_app_code: None, + deployment_id: None, }; - + match db::project_app::insert(pg_pool.get_ref(), &app).await { Ok(created) => { imported.push(json!({ @@ -377,7 +402,7 @@ pub async fn import_containers( "name": created.name, "container_name": container.container_name, })); - + tracing::info!( user_id = %user.id, project_id = project_id, @@ -392,7 +417,7 @@ pub async fn import_containers( } } } - + Ok(JsonResponse::build() .set_item(Some(json!({ "imported": imported, @@ -426,12 +451,12 @@ fn container_matches_app(container_name: &str, app_code: &str) -> bool { if container_name == app_code { return true; } - + // Container ends with app_code (e.g., "statuspanel_agent" matches "agent") if container_name.ends_with(app_code) { return true; } - + // Container is {app_code}_{number} or {app_code}-{number} if container_name.starts_with(app_code) { let suffix = &container_name[app_code.len()..]; @@ -443,13 +468,13 @@ fn container_matches_app(container_name: &str, app_code: &str) -> bool { } } } - + // Container is {project}-{app_code}-{number} let parts: Vec<&str> = container_name.split('-').collect(); if parts.len() >= 2 && parts[parts.len() - 2] == app_code { return true; } - + false } @@ -461,7 +486,7 @@ fn suggest_app_info(container_name: &str, image: &str) -> (String, String) { let name = capitalize(&code); return (code, name); } - + // Try to extract from project-service-replica pattern let parts: Vec<&str> = container_name.split('-').collect(); if parts.len() >= 2 { @@ -470,14 +495,14 @@ fn suggest_app_info(container_name: &str, image: &str) -> (String, String) { return (service.to_string(), capitalize(service)); } } - + // Extract from image name (last part before tag) if let Some(img_name) = image.split('/').last() { if let Some(name_without_tag) = img_name.split(':').next() { return (name_without_tag.to_string(), capitalize(name_without_tag)); } } - + // Fallback: use container name (container_name.to_string(), capitalize(container_name)) } diff --git a/src/routes/server/get.rs b/src/routes/server/get.rs index ea36b784..9d3ef9dd 100644 --- a/src/routes/server/get.rs +++ b/src/routes/server/get.rs @@ -38,10 +38,10 @@ pub async fn list( user: web::ReqData>, pg_pool: web::Data, ) -> Result { - db::server::fetch_by_user(pg_pool.get_ref(), user.id.as_ref()) + db::server::fetch_by_user_with_provider(pg_pool.get_ref(), user.id.as_ref()) .await - .map(|server| JsonResponse::build().set_list(server).ok("OK")) - .map_err(|_err| JsonResponse::::build().internal_server_error("")) + .map(|servers| JsonResponse::build().set_list(servers).ok("OK")) + .map_err(|_err| JsonResponse::::build().internal_server_error("")) } #[tracing::instrument(name = "Get servers by project.")] @@ -54,7 +54,7 @@ pub async fn list_by_project( let project_id = path.0; // Verify user owns the project - let project = db::project::fetch(pg_pool.get_ref(), project_id) + let _project = db::project::fetch(pg_pool.get_ref(), project_id) .await .map_err(|_err| JsonResponse::::build().internal_server_error("")) .and_then(|p| match p { diff --git a/src/routes/server/ssh_key.rs b/src/routes/server/ssh_key.rs index 66f23515..5501dc0c 100644 --- a/src/routes/server/ssh_key.rs +++ b/src/routes/server/ssh_key.rs @@ -28,6 +28,16 @@ pub struct GenerateKeyResponse { pub message: String, } +/// Response for SSH key generation (with optional private key if Vault fails) +#[derive(Debug, Clone, Default, Serialize)] +pub struct GenerateKeyResponseWithPrivate { + pub public_key: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub private_key: Option, + pub fingerprint: Option, + pub message: String, +} + /// Helper to verify server ownership async fn verify_server_ownership( pg_pool: &PgPool, @@ -85,34 +95,33 @@ pub async fn generate_key( .internal_server_error("Failed to generate SSH key") })?; - // Store in Vault - let vault_path = vault_client + // Try to store in Vault, but don't fail if it doesn't work + let vault_result = vault_client .get_ref() .store_ssh_key(&user.id, server_id, &public_key, &private_key) - .await - .map_err(|e| { - tracing::error!("Failed to store SSH key in Vault: {}", e); - let _ = futures::executor::block_on(db::server::update_ssh_key_status( - pg_pool.get_ref(), - server_id, - None, - "failed", - )); - JsonResponse::::build() - .internal_server_error("Failed to store SSH key") - })?; + .await; + + let (vault_path, status, message, include_private_key) = match vault_result { + Ok(path) => { + tracing::info!("SSH key stored in Vault successfully"); + (Some(path), "active", "SSH key generated and stored in Vault successfully. Copy the public key to your server's authorized_keys.".to_string(), false) + } + Err(e) => { + tracing::warn!("Failed to store SSH key in Vault (continuing without Vault): {}", e); + (None, "active", format!("SSH key generated successfully, but could not be stored in Vault ({}). Please save the private key shown below - it will not be shown again!", e), true) + } + }; // Update server with vault path and active status - db::server::update_ssh_key_status(pg_pool.get_ref(), server_id, Some(vault_path), "active") + db::server::update_ssh_key_status(pg_pool.get_ref(), server_id, vault_path, status) .await .map_err(|e| JsonResponse::::build().internal_server_error(&e))?; - let response = GenerateKeyResponse { - public_key, + let response = GenerateKeyResponseWithPrivate { + public_key: public_key.clone(), + private_key: if include_private_key { Some(private_key) } else { None }, fingerprint: None, // TODO: Calculate fingerprint - message: - "SSH key generated successfully. Copy the public key to your server's authorized_keys." - .to_string(), + message, }; Ok(JsonResponse::build() @@ -220,6 +229,176 @@ pub async fn get_public_key( Ok(JsonResponse::build().set_item(Some(response)).ok("OK")) } +/// Response for SSH validation with full system check +#[derive(Debug, Clone, Default, Serialize)] +pub struct ValidateResponse { + pub valid: bool, + pub server_id: i32, + pub srv_ip: Option, + pub message: String, + /// SSH connection was successful + pub connected: bool, + /// SSH authentication was successful + pub authenticated: bool, + /// Username from whoami + #[serde(skip_serializing_if = "Option::is_none")] + pub username: Option, + /// Total disk space in GB + #[serde(skip_serializing_if = "Option::is_none")] + pub disk_total_gb: Option, + /// Available disk space in GB + #[serde(skip_serializing_if = "Option::is_none")] + pub disk_available_gb: Option, + /// Disk usage percentage + #[serde(skip_serializing_if = "Option::is_none")] + pub disk_usage_percent: Option, + /// Docker is installed + pub docker_installed: bool, + /// Docker version string + #[serde(skip_serializing_if = "Option::is_none")] + pub docker_version: Option, + /// OS name (from /etc/os-release) + #[serde(skip_serializing_if = "Option::is_none")] + pub os_name: Option, + /// OS version + #[serde(skip_serializing_if = "Option::is_none")] + pub os_version: Option, + /// Total memory in MB + #[serde(skip_serializing_if = "Option::is_none")] + pub memory_total_mb: Option, + /// Available memory in MB + #[serde(skip_serializing_if = "Option::is_none")] + pub memory_available_mb: Option, +} + +/// Validate SSH connection for a server +/// POST /server/{id}/ssh-key/validate +/// +/// This endpoint: +/// 1. Verifies the server exists and belongs to the user +/// 2. Checks the SSH key is active and retrieves it from Vault +/// 3. Connects to the server via SSH and authenticates +/// 4. Runs system diagnostic commands (whoami, df, docker, os-release, free) +/// 5. Returns comprehensive system information +#[tracing::instrument(name = "Validate SSH key for server.")] +#[post("/{id}/ssh-key/validate")] +pub async fn validate_key( + path: web::Path<(i32,)>, + user: web::ReqData>, + pg_pool: web::Data, + vault_client: web::Data, +) -> Result { + use crate::helpers::ssh_client; + use std::time::Duration; + + let server_id = path.0; + let server = verify_server_ownership(pg_pool.get_ref(), server_id, &user.id).await?; + + // Check if server has an active key + if server.key_status != "active" { + let response = ValidateResponse { + valid: false, + server_id, + srv_ip: server.srv_ip.clone(), + message: format!("SSH key status is '{}', not active", server.key_status), + ..Default::default() + }; + return Ok(JsonResponse::build() + .set_item(Some(response)) + .ok("Validation failed")); + } + + // Verify we have the server IP + let srv_ip = match &server.srv_ip { + Some(ip) if !ip.is_empty() => ip.clone(), + _ => { + let response = ValidateResponse { + valid: false, + server_id, + srv_ip: server.srv_ip.clone(), + message: "Server IP address not configured".to_string(), + ..Default::default() + }; + return Ok(JsonResponse::build() + .set_item(Some(response)) + .ok("Validation failed")); + } + }; + + // Fetch private key from Vault + let private_key = match vault_client + .get_ref() + .fetch_ssh_key(&user.id, server_id) + .await + { + Ok(key) => key, + Err(e) => { + tracing::warn!("Failed to fetch SSH key from Vault during validation: {}", e); + let response = ValidateResponse { + valid: false, + server_id, + srv_ip: server.srv_ip.clone(), + message: "SSH key could not be retrieved from secure storage".to_string(), + ..Default::default() + }; + return Ok(JsonResponse::build() + .set_item(Some(response)) + .ok("Validation failed")); + } + }; + + // Get SSH connection parameters + let ssh_port = server.ssh_port.unwrap_or(22) as u16; + let ssh_user = server.ssh_user.clone().unwrap_or_else(|| "root".to_string()); + + // Perform SSH connection and system check + let check_result = ssh_client::check_server( + &srv_ip, + ssh_port, + &ssh_user, + &private_key, + Duration::from_secs(30), + ) + .await; + + // Build response from check result + let valid = check_result.connected && check_result.authenticated; + let message = if valid { + check_result.summary() + } else { + check_result.error.unwrap_or_else(|| "SSH validation failed".to_string()) + }; + + let response = ValidateResponse { + valid, + server_id, + srv_ip: Some(srv_ip), + message, + connected: check_result.connected, + authenticated: check_result.authenticated, + username: check_result.username, + disk_total_gb: check_result.disk_total_gb, + disk_available_gb: check_result.disk_available_gb, + disk_usage_percent: check_result.disk_usage_percent, + docker_installed: check_result.docker_installed, + docker_version: check_result.docker_version, + os_name: check_result.os_name, + os_version: check_result.os_version, + memory_total_mb: check_result.memory_total_mb, + memory_available_mb: check_result.memory_available_mb, + }; + + let ok_message = if valid { + "SSH connection validated successfully" + } else { + "SSH validation failed" + }; + + Ok(JsonResponse::build() + .set_item(Some(response)) + .ok(ok_message)) +} + /// Delete SSH key for a server (disconnect) /// DELETE /server/{id}/ssh-key #[tracing::instrument(name = "Delete SSH key for server.")] diff --git a/src/routes/test/stack_view.rs b/src/routes/test/stack_view.rs index 74196a49..a8e3a50d 100644 --- a/src/routes/test/stack_view.rs +++ b/src/routes/test/stack_view.rs @@ -1,12 +1,14 @@ -use actix_web::{get, web, HttpResponse, Responder}; use crate::connectors::user_service::UserServiceClient; +use actix_web::{get, web, HttpResponse, Responder}; #[get("/stack_view")] -pub async fn test_stack_view(settings: web::Data) -> impl Responder { +pub async fn test_stack_view( + settings: web::Data, +) -> impl Responder { tracing::info!("Testing stack_view fetch from user service"); - + let client = UserServiceClient::new_public(&settings.user_service_url); - + match client.search_stack_view("", None).await { Ok(apps) => { tracing::info!("Successfully fetched {} applications", apps.len()); @@ -15,7 +17,7 @@ pub async fn test_stack_view(settings: web::Data "count": apps.len(), "message": format!("Successfully fetched {} applications from {}", apps.len(), settings.user_service_url) })) - }, + } Err(e) => { tracing::error!("Failed to fetch stack_view: {:?}", e); HttpResponse::InternalServerError().json(serde_json::json!({ diff --git a/src/services/config_renderer.rs b/src/services/config_renderer.rs index a5b38c8d..7da98cb7 100644 --- a/src/services/config_renderer.rs +++ b/src/services/config_renderer.rs @@ -13,7 +13,8 @@ use crate::models::{Project, ProjectApp}; use crate::services::vault_service::{AppConfig, VaultError, VaultService}; use anyhow::{Context, Result}; use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; +use serde_json::Value; +use serde_json::json; use std::collections::HashMap; use tera::{Context as TeraContext, Tera}; diff --git a/src/services/project_app_service.rs b/src/services/project_app_service.rs index e50e1f20..8ec8632c 100644 --- a/src/services/project_app_service.rs +++ b/src/services/project_app_service.rs @@ -4,6 +4,7 @@ //! syncs configuration changes to Vault for the Status Panel to consume. use crate::db; +use crate::forms::project::Payload; use crate::models::{Project, ProjectApp}; use crate::services::config_renderer::ConfigRenderer; use crate::services::vault_service::{VaultError, VaultService}; @@ -64,6 +65,22 @@ impl ProjectAppService { }) } + pub fn default_network_from_project(project: &Project) -> Option { + Payload::try_from(project).ok().and_then(|payload| { + payload + .custom + .networks + .networks + .as_ref() + .and_then(|networks| { + networks + .iter() + .find(|network| network.name == "default_network") + .map(|network| network.name.clone()) + }) + }) + } + /// Create service without Vault sync (for testing or offline mode) pub fn new_without_sync(pool: Arc) -> std::result::Result { let config_renderer = ConfigRenderer::new() diff --git a/src/services/vault_service.rs b/src/services/vault_service.rs index ead20671..d0183b60 100644 --- a/src/services/vault_service.rs +++ b/src/services/vault_service.rs @@ -6,7 +6,7 @@ //! //! Vault Path Template: {prefix}/{deployment_hash}/apps/{app_name}/config -use anyhow::{Context, Result}; +use anyhow::Result; use reqwest::Client; use serde::{Deserialize, Serialize}; use std::collections::HashMap; diff --git a/src/startup.rs b/src/startup.rs index e6e7c3b6..3437a032 100644 --- a/src/startup.rs +++ b/src/startup.rs @@ -118,7 +118,7 @@ pub async fn run( .service( web::scope("/test") .service(routes::test::deploy::handler) - .service(routes::test::stack_view::test_stack_view) + .service(routes::test::stack_view::test_stack_view), ) .service( web::scope("/rating") @@ -195,6 +195,7 @@ pub async fn run( .service(crate::routes::marketplace::creator::create_handler) .service(crate::routes::marketplace::creator::update_handler) .service(crate::routes::marketplace::creator::submit_handler) + .service(crate::routes::marketplace::creator::resubmit_handler) .service(crate::routes::marketplace::creator::mine_handler), ) .service( @@ -223,8 +224,11 @@ pub async fn run( .service( crate::routes::marketplace::admin::list_submitted_handler, ) + .service(crate::routes::marketplace::admin::detail_handler) .service(crate::routes::marketplace::admin::approve_handler) - .service(crate::routes::marketplace::admin::reject_handler), + .service(crate::routes::marketplace::admin::reject_handler) + .service(crate::routes::marketplace::admin::unapprove_handler) + .service(crate::routes::marketplace::admin::security_scan_handler), ) .service( web::scope("/marketplace") @@ -250,6 +254,7 @@ pub async fn run( .service(crate::routes::server::ssh_key::generate_key) .service(crate::routes::server::ssh_key::upload_key) .service(crate::routes::server::ssh_key::get_public_key) + .service(crate::routes::server::ssh_key::validate_key) .service(crate::routes::server::ssh_key::delete_key), ) .service( diff --git a/tests/dockerhub.rs b/tests/dockerhub.rs index 7280a324..e2662227 100644 --- a/tests/dockerhub.rs +++ b/tests/dockerhub.rs @@ -1,7 +1,6 @@ // use std::fs; // use std::collections::HashMap; -use docker_compose_types::{ComposeVolume, SingleValue}; -use std::env; +use docker_compose_types::ComposeVolume; mod common; use stacker::forms::project::DockerImage; @@ -134,7 +133,6 @@ async fn test_docker_non_existent_repo_empty_namespace() { #[tokio::test] async fn test_docker_named_volume() { - let base_dir = env::var("DEFAULT_DEPLOY_DIR").unwrap_or_else(|_| "/home/trydirect".to_string()); let volume = Volume { host_path: Some("flask-data".to_owned()), container_path: Some("/var/www/flaskdata".to_owned()), @@ -144,15 +142,6 @@ async fn test_docker_named_volume() { println!("ComposeVolume: {:?}", cv); println!("{:?}", cv.driver_opts); assert_eq!(Some("flask-data".to_string()), cv.name); - assert_eq!( - &Some(SingleValue::String(format!( - "{}/flask-data", - base_dir.trim_end_matches('/') - ))), - cv.driver_opts.get("device").unwrap() - ); - assert_eq!( - &Some(SingleValue::String("none".to_string())), - cv.driver_opts.get("type").unwrap() - ); + assert!(cv.driver.is_none()); + assert!(cv.driver_opts.is_empty()); } diff --git a/tests/marketplace_integration.rs b/tests/marketplace_integration.rs index 5165715b..6830548b 100644 --- a/tests/marketplace_integration.rs +++ b/tests/marketplace_integration.rs @@ -30,6 +30,9 @@ async fn test_deployment_free_template_allowed() { long_description: None, category_code: Some("cms".to_string()), product_id: None, // No paid product + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!(["free"]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -66,6 +69,9 @@ async fn test_deployment_plan_requirement_validated() { long_description: None, category_code: Some("enterprise".to_string()), product_id: None, + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!(["professional"]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -106,6 +112,9 @@ async fn test_deployment_owned_paid_template_allowed() { long_description: None, category_code: Some("ai".to_string()), product_id: Some(100), // Has product (paid) + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!(["ai", "agents", "paid"]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -145,6 +154,13 @@ fn test_webhook_payload_for_template_approval() { vendor_name: Some("John Doe".to_string()), category: Some("AI Agents".to_string()), tags: Some(serde_json::json!(["ai", "agents", "marketplace"])), + long_description: None, + tech_stack: None, + creator_name: Some("John Doe".to_string()), + deploy_count: Some(10), + view_count: Some(100), + approved_at: Some("2026-02-11T00:00:00Z".to_string()), + required_plan_name: None, }; // Verify payload has all required fields for approval @@ -175,6 +191,13 @@ fn test_webhook_payload_for_template_update_price() { vendor_name: Some("John Doe".to_string()), category: Some("AI Agents".to_string()), tags: Some(serde_json::json!(["ai", "agents", "v2"])), + long_description: None, + tech_stack: None, + creator_name: Some("John Doe".to_string()), + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; assert_eq!(payload.action, "template_updated"); @@ -200,6 +223,13 @@ fn test_webhook_payload_for_template_rejection() { vendor_name: None, category: None, tags: None, + long_description: None, + tech_stack: None, + creator_name: None, + deploy_count: None, + view_count: None, + approved_at: None, + required_plan_name: None, }; assert_eq!(payload.action, "template_rejected"); @@ -225,6 +255,9 @@ async fn test_deployment_validation_flow_with_connector() { long_description: None, category_code: Some("cms".to_string()), product_id: None, + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!([]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -253,6 +286,9 @@ async fn test_deployment_validation_flow_with_connector() { long_description: None, category_code: Some("enterprise".to_string()), product_id: None, + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!([]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -362,6 +398,9 @@ async fn test_multiple_deployments_mixed_templates() { long_description: None, category_code: Some("test".to_string()), product_id: None, + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!([]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -390,6 +429,9 @@ async fn test_multiple_deployments_mixed_templates() { long_description: None, category_code: Some("test".to_string()), product_id: None, + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!([]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -423,6 +465,9 @@ async fn test_multiple_deployments_mixed_templates() { long_description: None, category_code: Some("test".to_string()), product_id: Some(100), // Has product + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!([]), tech_stack: serde_json::json!([]), status: "approved".to_string(), @@ -473,6 +518,9 @@ fn test_template_status_values() { long_description: None, category_code: None, product_id: None, + price: None, + billing_cycle: None, + currency: None, tags: serde_json::json!([]), tech_stack: serde_json::json!([]), status: "approved".to_string(),