From 5fc0a87dea71fd2783027574cd0e99c657d87f19 Mon Sep 17 00:00:00 2001 From: Maxwell Becker <49575486+mbecker20@users.noreply.github.com> Date: Mon, 2 Sep 2024 01:38:40 +0300 Subject: [PATCH] 1.14 - Rename to Komodo - Docker Management (#56) * setup network page * add Network, Image, Container * Docker ListItems and Inspects * frontend build * dev0 * network info working * fix cargo lock * dev1 * pages for the things * implement Active in dashboard * RunBuild update trigger list refresh * rename deployment executions to StartDeployment etc * add server level container control * dev2 * add Config field to Image * can get image labels from Config.Labels * mount container page * server show resource count * add GetContainerLog api * add _AllContainers api * dev3 * move ResourceTarget to entities mod * GetResourceMatchingContainer api * connect container to resource * dev4 add volume names to container list items * ts types * volume / image / network unused management * add image history to image page * fix PruneContainers incorret Operation * update cache for server for server after server actions * dev5 * add singapore to Hetzner * implement delete single network / image / volume api * dev6 * include "in use" on Docker Lists * add docker resource delete buttons * is nice * fix volume all in use * remove google font dependency * use host networking in test compose * implement Secret Variables (hidden in logs) * remove unneeded borrow * interpolate variables / secrets into extra args / onclone / onpull / command etc * validate empty strings before SelectItem * rename everything to Komodo * rename workspace to komodo * rc1 --- .vscode/tasks.json | 93 -- Cargo.lock | 1259 +++++++------- Cargo.toml | 31 +- bin/alerter/Cargo.toml | 2 +- bin/alerter/Dockerfile | 4 +- bin/alerter/README.md | 2 +- bin/alerter/src/main.rs | 4 +- bin/cli/Cargo.toml | 8 +- bin/cli/README.md | 16 +- bin/cli/src/args.rs | 4 +- bin/cli/src/exec.rs | 190 ++- bin/cli/src/main.rs | 11 +- bin/cli/src/state.rs | 14 +- bin/core/Cargo.toml | 4 +- bin/core/Dockerfile | 8 +- bin/core/src/api/auth.rs | 2 +- bin/core/src/api/execute/build.rs | 161 +- bin/core/src/api/execute/deployment.rs | 120 +- bin/core/src/api/execute/mod.rs | 38 +- bin/core/src/api/execute/procedure.rs | 2 +- bin/core/src/api/execute/repo.rs | 109 +- bin/core/src/api/execute/server.rs | 813 ++++++++- bin/core/src/api/execute/server_template.rs | 2 +- bin/core/src/api/execute/stack.rs | 46 +- bin/core/src/api/execute/sync.rs | 6 +- bin/core/src/api/read/alert.rs | 2 +- bin/core/src/api/read/alerter.rs | 2 +- bin/core/src/api/read/build.rs | 2 +- bin/core/src/api/read/builder.rs | 2 +- bin/core/src/api/read/deployment.rs | 17 +- bin/core/src/api/read/mod.rs | 18 +- bin/core/src/api/read/permission.rs | 2 +- bin/core/src/api/read/procedure.rs | 2 +- bin/core/src/api/read/provider.rs | 2 +- bin/core/src/api/read/repo.rs | 2 +- bin/core/src/api/read/search.rs | 5 +- bin/core/src/api/read/server.rs | 317 +++- bin/core/src/api/read/server_template.rs | 2 +- bin/core/src/api/read/stack.rs | 2 +- bin/core/src/api/read/sync.rs | 2 +- bin/core/src/api/read/tag.rs | 2 +- bin/core/src/api/read/toml.rs | 97 +- bin/core/src/api/read/update.rs | 5 +- bin/core/src/api/read/user.rs | 2 +- bin/core/src/api/read/user_group.rs | 2 +- bin/core/src/api/read/variable.rs | 30 +- bin/core/src/api/user.rs | 10 +- bin/core/src/api/write/alerter.rs | 2 +- bin/core/src/api/write/build.rs | 3 +- bin/core/src/api/write/builder.rs | 2 +- bin/core/src/api/write/deployment.rs | 10 +- bin/core/src/api/write/description.rs | 4 +- bin/core/src/api/write/mod.rs | 4 +- bin/core/src/api/write/permissions.rs | 4 +- bin/core/src/api/write/procedure.rs | 2 +- bin/core/src/api/write/provider.rs | 5 +- bin/core/src/api/write/repo.rs | 3 +- bin/core/src/api/write/server.rs | 45 +- bin/core/src/api/write/server_template.rs | 2 +- bin/core/src/api/write/service_user.rs | 6 +- bin/core/src/api/write/stack.rs | 10 +- bin/core/src/api/write/sync.rs | 14 +- bin/core/src/api/write/tag.rs | 5 +- bin/core/src/api/write/user_group.rs | 6 +- bin/core/src/api/write/variable.rs | 49 +- bin/core/src/auth/github/client.rs | 2 +- bin/core/src/auth/github/mod.rs | 6 +- bin/core/src/auth/google/client.rs | 4 +- bin/core/src/auth/google/mod.rs | 2 +- bin/core/src/auth/jwt.rs | 2 +- bin/core/src/auth/local.rs | 2 +- bin/core/src/auth/mod.rs | 4 +- bin/core/src/cloud/aws/ec2.rs | 11 +- bin/core/src/cloud/hetzner/common.rs | 5 + bin/core/src/cloud/hetzner/mod.rs | 5 +- bin/core/src/config.rs | 102 +- bin/core/src/db.rs | 5 +- bin/core/src/helpers/action_state.rs | 2 +- bin/core/src/helpers/alert.rs | 7 +- bin/core/src/helpers/build.rs | 2 +- bin/core/src/helpers/builder.rs | 12 +- bin/core/src/helpers/cache.rs | 2 +- bin/core/src/helpers/channel.rs | 2 +- bin/core/src/helpers/interpolate.rs | 222 +++ bin/core/src/helpers/mod.rs | 88 +- bin/core/src/helpers/procedure.rs | 362 +++- bin/core/src/helpers/prune.rs | 2 +- bin/core/src/helpers/query.rs | 164 +- bin/core/src/helpers/repo.rs | 2 +- bin/core/src/helpers/stack/execute.rs | 2 +- bin/core/src/helpers/stack/mod.rs | 6 +- bin/core/src/helpers/stack/remote.rs | 3 +- bin/core/src/helpers/stack/services.rs | 2 +- bin/core/src/helpers/sync/deploy.rs | 10 +- bin/core/src/helpers/sync/file.rs | 2 +- bin/core/src/helpers/sync/mod.rs | 2 +- bin/core/src/helpers/sync/remote.rs | 3 +- bin/core/src/helpers/sync/resource.rs | 26 +- bin/core/src/helpers/sync/resources.rs | 126 +- bin/core/src/helpers/sync/user_groups.rs | 5 +- bin/core/src/helpers/sync/variables.rs | 115 +- bin/core/src/helpers/update.rs | 130 +- bin/core/src/listener/github/build.rs | 2 +- bin/core/src/listener/github/mod.rs | 4 +- bin/core/src/listener/github/procedure.rs | 2 +- bin/core/src/listener/github/repo.rs | 2 +- bin/core/src/listener/github/stack.rs | 2 +- bin/core/src/listener/github/sync.rs | 2 +- bin/core/src/main.rs | 4 +- bin/core/src/monitor/alert/deployment.rs | 7 +- bin/core/src/monitor/alert/mod.rs | 2 +- bin/core/src/monitor/alert/server.rs | 14 +- bin/core/src/monitor/alert/stack.rs | 7 +- bin/core/src/monitor/helpers.rs | 27 +- bin/core/src/monitor/lists.rs | 67 +- bin/core/src/monitor/mod.rs | 38 +- bin/core/src/monitor/record.rs | 2 +- bin/core/src/monitor/resources.rs | 11 +- bin/core/src/resource/alerter.rs | 8 +- bin/core/src/resource/build.rs | 8 +- bin/core/src/resource/builder.rs | 8 +- bin/core/src/resource/deployment.rs | 16 +- bin/core/src/resource/mod.rs | 66 +- bin/core/src/resource/procedure.rs | 175 +- bin/core/src/resource/repo.rs | 8 +- bin/core/src/resource/server.rs | 12 +- bin/core/src/resource/server_template.rs | 8 +- bin/core/src/resource/stack.rs | 8 +- bin/core/src/resource/sync.rs | 14 +- bin/core/src/state.rs | 2 +- bin/core/src/ws.rs | 4 +- bin/migrator/Cargo.toml | 5 +- bin/migrator/Dockerfile | 4 +- bin/migrator/README.md | 27 +- bin/migrator/src/legacy/mod.rs | 1 - bin/migrator/src/legacy/v0/build.rs | 253 --- bin/migrator/src/legacy/v0/config.rs | 201 --- bin/migrator/src/legacy/v0/deployment.rs | 386 ----- bin/migrator/src/legacy/v0/mod.rs | 342 ---- bin/migrator/src/legacy/v0/server.rs | 321 ---- bin/migrator/src/legacy/v0/update.rs | 144 -- bin/migrator/src/legacy/v0/user.rs | 115 -- bin/migrator/src/legacy/v1_11/build.rs | 34 +- bin/migrator/src/legacy/v1_11/deployment.rs | 17 +- bin/migrator/src/legacy/v1_6/build.rs | 16 +- bin/migrator/src/legacy/v1_6/deployment.rs | 14 +- bin/migrator/src/legacy/v1_6/mod.rs | 4 +- bin/migrator/src/main.rs | 53 - bin/migrator/src/migrate/mod.rs | 1 - bin/migrator/src/migrate/v0.rs | 309 ---- bin/migrator/src/migrate/v1_11.rs | 2 +- bin/migrator/src/migrate/v1_6.rs | 2 +- bin/periphery/Cargo.toml | 4 +- bin/periphery/Dockerfile | 6 +- bin/periphery/src/api/build.rs | 46 +- bin/periphery/src/api/compose.rs | 29 +- bin/periphery/src/api/container.rs | 209 ++- bin/periphery/src/api/deploy.rs | 15 +- bin/periphery/src/api/git.rs | 36 +- bin/periphery/src/api/image.rs | 63 + bin/periphery/src/api/mod.rs | 111 +- bin/periphery/src/api/network.rs | 26 +- bin/periphery/src/api/volume.rs | 47 + bin/periphery/src/compose.rs | 55 +- bin/periphery/src/config.rs | 4 +- bin/periphery/src/docker.rs | 875 +++++++++- bin/periphery/src/helpers.rs | 2 +- bin/periphery/src/main.rs | 2 +- bin/periphery/src/stats.rs | 4 +- bin/update_logger/Cargo.toml | 2 +- bin/update_logger/Dockerfile | 4 +- bin/update_logger/src/main.rs | 6 +- changelog.md | 15 + client/core/rs/Cargo.toml | 6 +- client/core/rs/README.md | 4 +- client/core/rs/src/api/auth.rs | 14 +- client/core/rs/src/api/execute/build.rs | 6 +- client/core/rs/src/api/execute/deployment.rs | 34 +- client/core/rs/src/api/execute/mod.rs | 39 +- client/core/rs/src/api/execute/procedure.rs | 4 +- client/core/rs/src/api/execute/repo.rs | 10 +- client/core/rs/src/api/execute/server.rs | 463 +++++- .../rs/src/api/execute/server_template.rs | 4 +- client/core/rs/src/api/execute/stack.rs | 16 +- client/core/rs/src/api/execute/sync.rs | 4 +- client/core/rs/src/api/mod.rs | 8 +- client/core/rs/src/api/read/alert.rs | 6 +- client/core/rs/src/api/read/alerter.rs | 10 +- client/core/rs/src/api/read/build.rs | 22 +- client/core/rs/src/api/read/builder.rs | 10 +- client/core/rs/src/api/read/deployment.rs | 44 +- client/core/rs/src/api/read/mod.rs | 19 +- client/core/rs/src/api/read/permission.rs | 10 +- client/core/rs/src/api/read/procedure.rs | 12 +- client/core/rs/src/api/read/provider.rs | 10 +- client/core/rs/src/api/read/repo.rs | 14 +- client/core/rs/src/api/read/search.rs | 7 +- client/core/rs/src/api/read/server.rs | 270 ++- .../core/rs/src/api/read/server_template.rs | 10 +- client/core/rs/src/api/read/stack.rs | 22 +- client/core/rs/src/api/read/sync.rs | 14 +- client/core/rs/src/api/read/tag.rs | 6 +- client/core/rs/src/api/read/toml.rs | 8 +- client/core/rs/src/api/read/update.rs | 6 +- client/core/rs/src/api/read/user.rs | 14 +- client/core/rs/src/api/read/user_group.rs | 6 +- client/core/rs/src/api/read/variable.rs | 12 +- client/core/rs/src/api/user.rs | 12 +- client/core/rs/src/api/write/alerter.rs | 10 +- client/core/rs/src/api/write/api_key.rs | 6 +- client/core/rs/src/api/write/build.rs | 16 +- client/core/rs/src/api/write/builder.rs | 10 +- client/core/rs/src/api/write/deployment.rs | 12 +- client/core/rs/src/api/write/description.rs | 6 +- client/core/rs/src/api/write/mod.rs | 2 +- client/core/rs/src/api/write/permissions.rs | 11 +- client/core/rs/src/api/write/procedure.rs | 10 +- client/core/rs/src/api/write/provider.rs | 14 +- client/core/rs/src/api/write/repo.rs | 20 +- client/core/rs/src/api/write/server.rs | 29 +- .../core/rs/src/api/write/server_template.rs | 10 +- client/core/rs/src/api/write/stack.rs | 18 +- client/core/rs/src/api/write/sync.rs | 16 +- client/core/rs/src/api/write/tags.rs | 12 +- client/core/rs/src/api/write/user.rs | 6 +- client/core/rs/src/api/write/user_group.rs | 14 +- client/core/rs/src/api/write/variable.rs | 36 +- client/core/rs/src/busy.rs | 10 +- client/core/rs/src/entities/alert.rs | 32 +- client/core/rs/src/entities/alerter.rs | 2 +- client/core/rs/src/entities/build.rs | 4 +- client/core/rs/src/entities/builder.rs | 2 +- client/core/rs/src/entities/config/core.rs | 152 +- .../core/rs/src/entities/config/periphery.rs | 14 +- client/core/rs/src/entities/deployment.rs | 101 +- .../core/rs/src/entities/docker/container.rs | 1161 +++++++++++++ client/core/rs/src/entities/docker/image.rs | 147 ++ client/core/rs/src/entities/docker/mod.rs | 177 ++ client/core/rs/src/entities/docker/network.rs | 114 ++ client/core/rs/src/entities/docker/volume.rs | 383 +++++ client/core/rs/src/entities/logger.rs | 2 +- client/core/rs/src/entities/mod.rs | 208 ++- client/core/rs/src/entities/permission.rs | 2 +- client/core/rs/src/entities/repo.rs | 2 +- client/core/rs/src/entities/resource.rs | 4 +- .../src/entities/{server/mod.rs => server.rs} | 34 +- .../rs/src/entities/server/docker_image.rs | 98 -- .../rs/src/entities/server/docker_network.rs | 220 --- .../src/entities/server_template/hetzner.rs | 1 + client/core/rs/src/entities/stack.rs | 22 +- .../rs/src/entities/{server => }/stats.rs | 39 +- client/core/rs/src/entities/sync.rs | 4 +- client/core/rs/src/entities/toml.rs | 20 +- client/core/rs/src/entities/update.rs | 151 +- client/core/rs/src/entities/user.rs | 4 +- client/core/rs/src/entities/user_group.rs | 3 +- client/core/rs/src/entities/variable.rs | 8 + client/core/rs/src/lib.rs | 56 +- client/core/rs/src/request.rs | 22 +- client/core/rs/src/ws.rs | 6 +- client/core/ts/package.json | 2 +- client/core/ts/src/lib.ts | 2 +- client/core/ts/src/responses.ts | 48 +- client/core/ts/src/types.ts | 1469 +++++++++++++++-- client/periphery/rs/Cargo.toml | 2 +- client/periphery/rs/src/api/build.rs | 25 +- client/periphery/rs/src/api/compose.rs | 5 +- client/periphery/rs/src/api/container.rs | 60 +- client/periphery/rs/src/api/git.rs | 40 +- client/periphery/rs/src/api/image.rs | 37 + client/periphery/rs/src/api/mod.rs | 17 +- client/periphery/rs/src/api/network.rs | 13 +- client/periphery/rs/src/api/stats.rs | 4 +- client/periphery/rs/src/api/volume.rs | 26 + config_example/aio.compose.yaml | 64 +- config_example/core.compose.yaml | 48 +- config_example/core.config.example.toml | 148 +- config_example/periphery.config.example.toml | 22 +- docsite/docs/api.md | 6 +- docsite/docs/build-images/builders.md | 12 +- docsite/docs/build-images/configuration.md | 10 +- docsite/docs/build-images/index.mdx | 2 +- docsite/docs/build-images/versioning.md | 6 +- docsite/docs/connecting-servers.md | 37 +- docsite/docs/core-setup.md | 40 +- .../docs/deploy-containers/configuration.md | 18 +- docsite/docs/deploy-containers/index.mdx | 2 +- .../deploy-containers/lifetime-management.md | 2 +- docsite/docs/docker-compose.md | 20 +- docsite/docs/file-paths.md | 2 +- docsite/docs/intro.md | 20 +- docsite/docs/permissioning.md | 8 +- docsite/docs/resources.md | 4 +- docsite/docs/sync-resources.md | 40 +- docsite/docs/version-upgrades.md | 4 +- docsite/docs/webhooks.md | 21 +- docsite/docusaurus.config.ts | 18 +- docsite/package-lock.json | 968 ++++++----- docsite/package.json | 10 +- .../src/components/HomepageFeatures/index.tsx | 6 +- .../{MonitorLogo.tsx => KomodoLogo.tsx} | 2 +- docsite/src/pages/index.tsx | 14 +- frontend/Dockerfile | 6 +- frontend/index.html | 2 +- frontend/package.json | 2 +- frontend/public/manifest.json | 4 +- frontend/src/components/alert/index.tsx | 2 +- frontend/src/components/alert/table.tsx | 2 +- frontend/src/components/alert/topbar.tsx | 2 +- frontend/src/components/config/index.tsx | 2 +- frontend/src/components/config/util.tsx | 50 +- frontend/src/components/export.tsx | 2 +- frontend/src/components/keys/table.tsx | 2 +- frontend/src/components/layouts.tsx | 2 +- frontend/src/components/log.tsx | 2 +- .../resources/alerter/config/alert_types.tsx | 2 +- .../resources/alerter/config/endpoint.tsx | 2 +- .../resources/alerter/config/index.tsx | 2 +- .../resources/alerter/config/resources.tsx | 2 +- .../components/resources/alerter/index.tsx | 2 +- .../components/resources/alerter/table.tsx | 2 +- .../components/resources/build/actions.tsx | 2 +- .../src/components/resources/build/config.tsx | 4 +- .../src/components/resources/build/index.tsx | 2 +- .../src/components/resources/build/table.tsx | 2 +- .../components/resources/builder/config.tsx | 2 +- .../components/resources/builder/index.tsx | 2 +- .../components/resources/builder/table.tsx | 2 +- frontend/src/components/resources/common.tsx | 8 +- .../resources/deployment/actions.tsx | 40 +- .../config/components/environment.tsx | 2 +- .../deployment/config/components/image.tsx | 2 +- .../deployment/config/components/network.tsx | 24 +- .../deployment/config/components/ports.tsx | 2 +- .../deployment/config/components/restart.tsx | 2 +- .../config/components/term-signal.tsx | 2 +- .../deployment/config/components/volumes.tsx | 2 +- .../resources/deployment/config/index.tsx | 2 +- .../components/resources/deployment/index.tsx | 24 +- .../components/resources/deployment/log.tsx | 6 +- .../components/resources/deployment/table.tsx | 2 +- .../components/resources/procedure/config.tsx | 428 +++-- .../components/resources/procedure/index.tsx | 4 +- .../components/resources/procedure/table.tsx | 2 +- .../src/components/resources/repo/actions.tsx | 2 +- .../src/components/resources/repo/config.tsx | 4 +- .../src/components/resources/repo/index.tsx | 2 +- .../src/components/resources/repo/table.tsx | 2 +- .../resources/resource-sync/config.tsx | 4 +- .../resources/resource-sync/index.tsx | 2 +- .../resource-sync/pending-or-config.tsx | 2 +- .../resources/resource-sync/table.tsx | 2 +- .../resources/server-template/actions.tsx | 2 +- .../resources/server-template/config/aws.tsx | 2 +- .../server-template/config/hetzner.tsx | 5 +- .../server-template/config/index.tsx | 2 +- .../resources/server-template/index.tsx | 2 +- .../resources/server-template/table.tsx | 2 +- .../components/resources/server/actions.tsx | 73 +- .../components/resources/server/config.tsx | 2 +- .../src/components/resources/server/hooks.ts | 2 +- .../src/components/resources/server/index.tsx | 242 ++- .../resources/server/info/containers.tsx | 25 + .../resources/server/info/images.tsx | 84 + .../resources/server/info/index.tsx | 88 + .../resources/server/info/networks.tsx | 105 ++ .../resources/server/info/volumes.tsx | 78 + .../resources/server/stat-chart.tsx | 2 +- .../src/components/resources/server/stats.tsx | 2 +- .../src/components/resources/server/table.tsx | 27 +- .../components/resources/stack/actions.tsx | 18 +- .../src/components/resources/stack/config.tsx | 4 +- .../src/components/resources/stack/index.tsx | 12 +- .../src/components/resources/stack/info.tsx | 2 +- .../components/resources/stack/services.tsx | 12 +- .../src/components/resources/stack/table.tsx | 2 +- frontend/src/components/tags/index.tsx | 2 +- frontend/src/components/topbar.tsx | 4 +- frontend/src/components/updates/resource.tsx | 4 +- frontend/src/components/updates/table.tsx | 2 +- frontend/src/components/updates/topbar.tsx | 2 +- frontend/src/components/users/hooks.ts | 2 +- .../components/users/permissions-table.tsx | 2 +- .../users/resource-type-permissions.tsx | 2 +- frontend/src/components/users/table.tsx | 2 +- frontend/src/components/util.tsx | 323 +++- frontend/src/globals.css | 11 +- frontend/src/lib/color.ts | 19 +- frontend/src/lib/formatting.ts | 13 +- frontend/src/lib/hooks.ts | 10 +- frontend/src/lib/socket.tsx | 46 +- frontend/src/lib/utils.ts | 2 +- frontend/src/main.tsx | 8 +- frontend/src/pages/alerts.tsx | 2 +- frontend/src/pages/home/all_resources.tsx | 24 +- frontend/src/pages/home/dashboard.tsx | 100 +- frontend/src/pages/login.tsx | 15 +- frontend/src/pages/resource.tsx | 2 +- frontend/src/pages/resources.tsx | 2 +- .../pages/server-info/container/actions.tsx | 182 ++ .../src/pages/server-info/container/index.tsx | 234 +++ .../src/pages/server-info/container/log.tsx | 162 ++ frontend/src/pages/server-info/image.tsx | 217 +++ frontend/src/pages/server-info/network.tsx | 282 ++++ frontend/src/pages/server-info/volume.tsx | 180 ++ frontend/src/pages/settings/providers.tsx | 4 +- frontend/src/pages/settings/tags.tsx | 17 - frontend/src/pages/settings/variables.tsx | 162 +- frontend/src/pages/stack-service/index.tsx | 24 +- frontend/src/pages/stack-service/log.tsx | 12 +- frontend/src/pages/updates.tsx | 24 +- frontend/src/pages/user-group.tsx | 2 +- frontend/src/router.tsx | 20 + frontend/src/types.d.ts | 2 +- frontend/yarn.lock | 8 +- ...or.code-workspace => komodo.code-workspace | 0 lib/command/Cargo.toml | 2 +- lib/command/src/lib.rs | 11 +- lib/git/Cargo.toml | 2 +- lib/git/src/lib.rs | 241 ++- lib/logger/Cargo.toml | 2 +- lib/logger/src/lib.rs | 2 +- readme.md | 58 +- roadmap.md | 4 +- runfile.toml | 21 +- scripts/readme.md | 14 +- scripts/setup-periphery.py | 33 +- test.compose.yaml | 61 +- test.core.config.toml | 5 +- 429 files changed, 14351 insertions(+), 7077 deletions(-) delete mode 100644 .vscode/tasks.json create mode 100644 bin/core/src/helpers/interpolate.rs delete mode 100644 bin/migrator/src/legacy/v0/build.rs delete mode 100644 bin/migrator/src/legacy/v0/config.rs delete mode 100644 bin/migrator/src/legacy/v0/deployment.rs delete mode 100644 bin/migrator/src/legacy/v0/mod.rs delete mode 100644 bin/migrator/src/legacy/v0/server.rs delete mode 100644 bin/migrator/src/legacy/v0/update.rs delete mode 100644 bin/migrator/src/legacy/v0/user.rs delete mode 100644 bin/migrator/src/migrate/v0.rs create mode 100644 bin/periphery/src/api/image.rs create mode 100644 bin/periphery/src/api/volume.rs create mode 100644 client/core/rs/src/entities/docker/container.rs create mode 100644 client/core/rs/src/entities/docker/image.rs create mode 100644 client/core/rs/src/entities/docker/mod.rs create mode 100644 client/core/rs/src/entities/docker/network.rs create mode 100644 client/core/rs/src/entities/docker/volume.rs rename client/core/rs/src/entities/{server/mod.rs => server.rs} (89%) delete mode 100644 client/core/rs/src/entities/server/docker_image.rs delete mode 100644 client/core/rs/src/entities/server/docker_network.rs rename client/core/rs/src/entities/{server => }/stats.rs (83%) create mode 100644 client/periphery/rs/src/api/image.rs create mode 100644 client/periphery/rs/src/api/volume.rs rename docsite/src/components/{MonitorLogo.tsx => KomodoLogo.tsx} (75%) create mode 100644 frontend/src/components/resources/server/info/containers.tsx create mode 100644 frontend/src/components/resources/server/info/images.tsx create mode 100644 frontend/src/components/resources/server/info/index.tsx create mode 100644 frontend/src/components/resources/server/info/networks.tsx create mode 100644 frontend/src/components/resources/server/info/volumes.tsx create mode 100644 frontend/src/pages/server-info/container/actions.tsx create mode 100644 frontend/src/pages/server-info/container/index.tsx create mode 100644 frontend/src/pages/server-info/container/log.tsx create mode 100644 frontend/src/pages/server-info/image.tsx create mode 100644 frontend/src/pages/server-info/network.tsx create mode 100644 frontend/src/pages/server-info/volume.tsx rename monitor.code-workspace => komodo.code-workspace (100%) diff --git a/.vscode/tasks.json b/.vscode/tasks.json deleted file mode 100644 index 758c060b3..000000000 --- a/.vscode/tasks.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "version": "2.0.0", - "tasks": [ - { - "type": "cargo", - "command": "build", - "group": { - "kind": "build", - "isDefault": true - }, - "label": "rust: cargo build" - }, - { - "type": "cargo", - "command": "fmt", - "label": "rust: cargo fmt" - }, - { - "type": "cargo", - "command": "check", - "label": "rust: cargo check" - }, - { - "label": "start dev", - "dependsOn": [ - "run core", - "start frontend" - ], - "problemMatcher": [] - }, - { - "type": "shell", - "command": "yarn start", - "label": "start frontend", - "options": { - "cwd": "${workspaceFolder}/frontend" - }, - "presentation": { - "group": "start" - } - }, - { - "type": "cargo", - "command": "run", - "label": "run core", - "options": { - "cwd": "${workspaceFolder}/bin/core" - }, - "presentation": { - "group": "start" - } - }, - { - "type": "cargo", - "command": "run", - "label": "run periphery", - "options": { - "cwd": "${workspaceFolder}/bin/periphery" - } - }, - { - "type": "cargo", - "command": "run", - "label": "run tests", - "options": { - "cwd": "${workspaceFolder}/bin/tests" - } - }, - { - "type": "cargo", - "command": "publish", - "args": ["--allow-dirty"], - "label": "publish types", - "options": { - "cwd": "${workspaceFolder}/lib/types" - } - }, - { - "type": "cargo", - "command": "publish", - "label": "publish rs client", - "options": { - "cwd": "${workspaceFolder}/lib/rs_client" - } - }, - { - "type": "shell", - "command": "node ./client/ts/generate_types.mjs", - "label": "generate typescript types", - "problemMatcher": [] - } - ] -} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index aec0e71b6..97020eced 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -26,7 +26,7 @@ dependencies = [ "cfg-if", "getrandom", "once_cell", - "version_check 0.9.4", + "version_check 0.9.5", "zerocopy", ] @@ -41,14 +41,14 @@ dependencies = [ [[package]] name = "alerter" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "anyhow", "axum", "dotenvy", "envy", + "komodo_client", "logger", - "monitor_client", "serde", "tokio", "tracing", @@ -71,9 +71,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -92,27 +92,27 @@ checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -132,7 +132,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -154,18 +154,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -225,9 +225,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16838e6c9e12125face1c1eff1343c75e3ff540de98ff7ebd61874a89bcfeb9" +checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -237,14 +237,15 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f42c2d4218de4dcd890a109461e2f799a1a2ba3bcd2cde9af88360f5df9266c6" +checksum = "2424565416eef55906f9f8cece2072b6b6a76075e3ff81483ebe938a89a4c05f" dependencies = [ "aws-credential-types", "aws-sigv4", "aws-smithy-async", "aws-smithy-http", + "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", @@ -261,9 +262,9 @@ dependencies = [ [[package]] name = "aws-sdk-ec2" -version = "1.66.0" +version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf8f784ab315034c3abbd4bd4aeb23c43093d5beb534f34fbb54df9064e4e96" +checksum = "75b504f61ac8793bb504d9aff11743caf35cfe30515ee5a92415a250539cf6f7" dependencies = [ "aws-credential-types", "aws-runtime", @@ -285,9 +286,9 @@ dependencies = [ [[package]] name = "aws-sdk-ecr" -version = "1.40.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6c64dafa2d1f523b2f24ab410d1c89f41d912c901e7599f9350ea1188fd42c" +checksum = "1e4c9459176ab063f753eb710186f5ea7d3db6e3bad3dadb9b3a9fe8d107651d" dependencies = [ "aws-credential-types", "aws-runtime", @@ -307,9 +308,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.38.0" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fca5e0b9fb285638f1007e9d961d963b9e504ab968fe5a3807cce94070bd0ce3" +checksum = "11822090cf501c316c6f75711d77b96fba30658e3867a7762e5e2f5d32d31e81" dependencies = [ "aws-credential-types", "aws-runtime", @@ -329,9 +330,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3e48ec239bb734db029ceef83599f4c9b3ce5d25c961b5bcd3f031c15bed54" +checksum = "78a2a06ff89176123945d1bbe865603c4d7101bea216a550bb4d2e4e9ba74d74" dependencies = [ "aws-credential-types", "aws-runtime", @@ -351,9 +352,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.38.0" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede095dfcc5c92b224813c24a82b65005a475c98d737e2726a898cf583e2e8bd" +checksum = "a20a91795850826a6f456f4a48eff1dfa59a0e69bdbf5b8c50518fd372106574" dependencies = [ "aws-credential-types", "aws-runtime", @@ -408,9 +409,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.60.9" +version = "0.60.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9cd0ae3d97daa0a2bf377a4d8e8e1362cae590c4a1aad0d40058ebca18eb91e" +checksum = "01dbcb6e2588fd64cfb6d7529661b06466419e4c54ed1c62d6510d2d0350a728" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -447,9 +448,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.6.3" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abbf454960d0db2ad12684a1640120e7557294b0ff8e2f11236290a1b293225" +checksum = "d1ce695746394772e7000b39fe073095db6d45a862d0767dd5ad0ac0d7f8eb87" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -460,9 +461,9 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", - "hyper 0.14.28", + "hyper 0.14.30", "hyper-rustls 0.24.2", "once_cell", "pin-project-lite", @@ -491,9 +492,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.2" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cee7cadb433c781d3299b916fbf620fea813bf38f49db282fb6858141a05cc8" +checksum = "273dcdfd762fae3e1650b8024624e7cd50e484e37abdab73a7a706188ad34543" dependencies = [ "base64-simd", "bytes", @@ -502,7 +503,7 @@ dependencies = [ "http 0.2.12", "http 1.1.0", "http-body 0.4.6", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "itoa", "num-integer", @@ -534,7 +535,7 @@ dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", "aws-smithy-types", - "rustc_version 0.4.0", + "rustc_version", "tracing", ] @@ -551,7 +552,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "hyper 1.4.1", "hyper-util", @@ -586,7 +587,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -609,7 +610,7 @@ dependencies = [ "futures-util", "headers", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -629,14 +630,14 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -696,9 +697,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitvec" @@ -733,9 +734,9 @@ dependencies = [ [[package]] name = "bollard" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a063d51a634c7137ecd9f6390ec78e1c512e84c9ded80198ec7df3339a16a33" +checksum = "d41711ad46fda47cd701f6908e59d1bd6b9a2b7464c0d0aeab95c6d37096ff8a" dependencies = [ "base64 0.22.1", "bollard-stubs", @@ -772,7 +773,7 @@ checksum = "6d7c5415e3a6bc6d3e99eff6268e488fd4ee25e7b28c10f08fa6760bd9de16e4" dependencies = [ "serde", "serde_repr", - "serde_with 3.8.1", + "serde_with", ] [[package]] @@ -785,7 +786,7 @@ dependencies = [ "base64 0.13.1", "bitvec", "hex", - "indexmap 2.2.6", + "indexmap 2.4.0", "js-sys", "once_cell", "rand", @@ -810,9 +811,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" dependencies = [ "serde", ] @@ -829,9 +830,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.97" +version = "1.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +dependencies = [ + "shlex", +] [[package]] name = "cfg-if" @@ -851,7 +855,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -883,7 +887,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", ] [[package]] @@ -895,20 +899,20 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "colored" @@ -922,9 +926,9 @@ dependencies = [ [[package]] name = "command" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ - "monitor_client", + "komodo_client", "run_command", ] @@ -946,15 +950,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" dependencies = [ "libc", ] @@ -980,9 +984,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crypto-common" @@ -996,72 +1000,37 @@ dependencies = [ [[package]] name = "darling" -version = "0.13.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" -dependencies = [ - "darling_core 0.20.9", - "darling_macro 0.20.9", + "darling_core", + "darling_macro", ] [[package]] name = "darling_core" -version = "0.13.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_core" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.64", + "strsim", + "syn 2.0.75", ] [[package]] name = "darling_macro" -version = "0.13.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.13.4", + "darling_core", "quote", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" -dependencies = [ - "darling_core 0.20.9", - "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -1093,33 +1062,33 @@ dependencies = [ [[package]] name = "derive_builder" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" dependencies = [ - "darling 0.20.9", + "darling", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "derive_builder_macro" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ "derive_builder_core", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -1139,7 +1108,7 @@ checksum = "9e520b61247a9470ec86a98baf2aebae5c6dd0f25f02b1c87cafe45b06c160e7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -1150,20 +1119,20 @@ checksum = "12c90da6aa09bad94e4411461560183be70bb33bc30efb2ce941492f22ed6850" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", - "syn 1.0.109", + "rustc_version", + "syn 2.0.75", ] [[package]] @@ -1184,7 +1153,7 @@ checksum = "1bceb8b4ad480f8cf02ae4efb42c95add230544b4239d543cbd9f9141d838581" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -1212,9 +1181,9 @@ checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" @@ -1227,14 +1196,14 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.75", ] [[package]] @@ -1268,12 +1237,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "fnv" version = "1.0.7" @@ -1306,7 +1269,7 @@ dependencies = [ [[package]] name = "formatting" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "serror", ] @@ -1373,7 +1336,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -1413,7 +1376,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", - "version_check 0.9.4", + "version_check 0.9.5", ] [[package]] @@ -1431,18 +1394,18 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "git" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "anyhow", "command", "formatting", - "monitor_client", + "komodo_client", "run_command", "svi", "tokio", @@ -1467,7 +1430,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.4.0", "slab", "tokio", "tokio-util", @@ -1476,9 +1439,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -1486,7 +1449,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.6", + "indexmap 2.4.0", "slab", "tokio", "tokio-util", @@ -1562,6 +1525,51 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hickory-proto" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.3", + "rand", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1617,9 +1625,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -1627,14 +1635,14 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", - "futures-core", + "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1646,9 +1654,9 @@ checksum = "08a397c49fec283e3d6211adbe480be95aae5f304cfb923e9970e08956d5168a" [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1658,9 +1666,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -1689,9 +1697,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -1724,7 +1732,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.28", + "hyper 0.14.30", "log", "rustls 0.21.12", "rustls-native-certs", @@ -1742,7 +1750,7 @@ dependencies = [ "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.10", + "rustls 0.23.12", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -1769,7 +1777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.28", + "hyper 0.14.30", "native-tls", "tokio", "tokio-native-tls", @@ -1793,15 +1801,15 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", "socket2", @@ -1857,11 +1865,10 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] @@ -1889,9 +1896,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -1928,7 +1935,7 @@ dependencies = [ "socket2", "widestring", "windows-sys 0.48.0", - "winreg 0.50.0", + "winreg", ] [[package]] @@ -1939,15 +1946,15 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] @@ -1960,9 +1967,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -1996,17 +2003,151 @@ dependencies = [ "sha2", ] +[[package]] +name = "komodo_cli" +version = "1.14.0-rc1" +dependencies = [ + "anyhow", + "clap", + "colored", + "futures", + "komodo_client", + "merge_config_files", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "komodo_client" +version = "1.14.0-rc1" +dependencies = [ + "anyhow", + "async_timing_util", + "bson", + "clap", + "derive_builder", + "derive_default_builder", + "derive_empty_traits", + "derive_variants", + "envy", + "futures", + "mongo_indexed", + "partial_derive2", + "reqwest 0.12.7", + "resolver_api", + "serde", + "serde_json", + "serror", + "strum 0.26.3", + "thiserror", + "tokio", + "tokio-tungstenite 0.23.1", + "tokio-util", + "tracing", + "typeshare", + "uuid", +] + +[[package]] +name = "komodo_core" +version = "1.14.0-rc1" +dependencies = [ + "anyhow", + "async_timing_util", + "aws-config", + "aws-sdk-ec2", + "aws-sdk-ecr", + "axum", + "axum-extra", + "base64 0.22.1", + "bcrypt", + "derive_variants", + "dotenvy", + "envy", + "formatting", + "futures", + "git", + "hex", + "hmac", + "jwt", + "komodo_client", + "logger", + "merge_config_files", + "mongo_indexed", + "mungos", + "nom_pem", + "octorust", + "ordered_hash_map", + "partial_derive2", + "periphery_client", + "rand", + "regex", + "reqwest 0.12.7", + "resolver_api", + "run_command", + "serde", + "serde_json", + "serde_yaml", + "serror", + "sha2", + "slack_client_rs", + "svi", + "tokio", + "tokio-util", + "toml", + "toml_pretty", + "tower-http", + "tracing", + "typeshare", + "urlencoding", + "uuid", +] + +[[package]] +name = "komodo_periphery" +version = "1.14.0-rc1" +dependencies = [ + "anyhow", + "async_timing_util", + "axum", + "axum-extra", + "bollard", + "clap", + "command", + "dotenvy", + "envy", + "formatting", + "futures", + "git", + "komodo_client", + "logger", + "merge_config_files", + "periphery_client", + "resolver_api", + "run_command", + "serde", + "serde_json", + "serror", + "svi", + "sysinfo", + "tokio", + "tracing", + "uuid", +] + [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "linked-hash-map" @@ -2032,19 +2173,19 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "serde", ] [[package]] name = "logger" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "anyhow", - "monitor_client", + "komodo_client", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -2069,12 +2210,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.3" @@ -2093,9 +2228,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "merge_config_files" @@ -2111,14 +2246,13 @@ dependencies = [ [[package]] name = "migrator" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "anyhow", - "chrono", "dotenvy", "envy", + "komodo_client", "logger", - "monitor_client", "mungos", "serde", "tokio", @@ -2133,9 +2267,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -2143,18 +2277,18 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi", "libc", @@ -2182,14 +2316,14 @@ checksum = "d4f3a4215a0cb95aea5fe33a77d38f0e0f7e7b9bf315bede04e0ea0c46fb704a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "mongodb" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4af699cbb12a221e978134999b16cbf030d964c3a7cd6c4af2584734190e58" +checksum = "c857d71f918b38221baf2fdff7207fec9984b4504901544772b1edf0302d669f" dependencies = [ "async-trait", "base64 0.13.1", @@ -2203,6 +2337,8 @@ dependencies = [ "futures-io", "futures-util", "hex", + "hickory-proto", + "hickory-resolver", "hmac", "md-5", "mongodb-internal-macros", @@ -2215,19 +2351,17 @@ dependencies = [ "rustls-pemfile 1.0.4", "serde", "serde_bytes", - "serde_with 1.14.0", + "serde_with", "sha-1", "sha2", "socket2", "stringprep", - "strsim 0.10.0", + "strsim", "take_mut", "thiserror", "tokio", "tokio-rustls 0.24.1", "tokio-util", - "trust-dns-proto", - "trust-dns-resolver", "typed-builder", "uuid", "webpki-roots", @@ -2235,155 +2369,20 @@ dependencies = [ [[package]] name = "mongodb-internal-macros" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1159486b5e1993138c385369c3c46f5608cb0b1c53734e503538366f3c4cedcc" +checksum = "3a6dbc533e93429a71c44a14c04547ac783b56d3f22e6c4f12b1b994cf93844e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", -] - -[[package]] -name = "monitor_cli" -version = "1.13.4" -dependencies = [ - "anyhow", - "clap", - "colored", - "futures", - "merge_config_files", - "monitor_client", - "serde", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "monitor_client" -version = "1.13.4" -dependencies = [ - "anyhow", - "async_timing_util", - "bollard", - "bson", - "clap", - "derive_builder", - "derive_default_builder", - "derive_empty_traits", - "derive_variants", - "envy", - "futures", - "mongo_indexed", - "partial_derive2", - "reqwest 0.12.5", - "resolver_api", - "serde", - "serde_json", - "serror", - "strum 0.26.3", - "thiserror", - "tokio", - "tokio-tungstenite 0.23.1", - "tokio-util", - "tracing", - "typeshare", - "uuid", -] - -[[package]] -name = "monitor_core" -version = "1.13.4" -dependencies = [ - "anyhow", - "async_timing_util", - "aws-config", - "aws-sdk-ec2", - "aws-sdk-ecr", - "axum", - "axum-extra", - "base64 0.22.1", - "bcrypt", - "derive_variants", - "dotenvy", - "envy", - "formatting", - "futures", - "git", - "hex", - "hmac", - "jwt", - "logger", - "merge_config_files", - "mongo_indexed", - "monitor_client", - "mungos", - "nom_pem", - "octorust", - "ordered_hash_map", - "partial_derive2", - "periphery_client", - "rand", - "regex", - "reqwest 0.12.5", - "resolver_api", - "run_command", - "serde", - "serde_json", - "serde_yaml", - "serror", - "sha2", - "slack_client_rs", - "svi", - "tokio", - "tokio-util", - "toml", - "toml_pretty", - "tower-http", - "tracing", - "typeshare", - "urlencoding", - "uuid", -] - -[[package]] -name = "monitor_periphery" -version = "1.13.4" -dependencies = [ - "anyhow", - "async_timing_util", - "axum", - "axum-extra", - "bollard", - "clap", - "command", - "dotenvy", - "envy", - "formatting", - "futures", - "git", - "logger", - "merge_config_files", - "monitor_client", - "periphery_client", - "resolver_api", - "run_command", - "serde", - "serde_json", - "serror", - "svi", - "sysinfo", - "tokio", - "tracing", - "uuid", + "syn 2.0.75", ] [[package]] name = "mungos" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59e4fbd27eaabaeb49c1d6260dee7dad26b26371ca3faec73c83406a506c1dc0" +checksum = "b2b0a35310ff16234743e7bc2292c19a537a492c1d7449b1e0254a9ec7db82c6" dependencies = [ "anyhow", "envy", @@ -2395,11 +2394,10 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -2485,9 +2483,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] @@ -2533,11 +2531,11 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssl" -version = "0.10.64" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -2554,7 +2552,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -2565,9 +2563,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.102" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -2681,9 +2679,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core 0.9.10", @@ -2711,9 +2709,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.1", + "redox_syscall 0.5.3", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2744,7 +2742,7 @@ checksum = "3a506f66d52e40b2385d7b9f776fd5243d6cff16ba79147f859aa4e27d2d27cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -2773,11 +2771,11 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "periphery_client" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "anyhow", - "monitor_client", - "reqwest 0.12.5", + "komodo_client", + "reqwest 0.12.7", "resolver_api", "serde", "serde_json", @@ -2802,7 +2800,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -2831,15 +2829,18 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] @@ -2864,7 +2865,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -2949,11 +2950,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] @@ -2981,9 +2982,9 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" [[package]] name = "regex-syntax" @@ -3005,7 +3006,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.28", + "hyper 0.14.30", "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", @@ -3023,7 +3024,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", @@ -3033,23 +3034,23 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots", - "winreg 0.50.0", + "winreg", ] [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.2", @@ -3063,12 +3064,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.2", + "rustls-pemfile 2.1.3", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", - "system-configuration", + "system-configuration 0.6.0", "tokio", "tokio-native-tls", "tower-service", @@ -3076,7 +3077,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.52.0", + "windows-registry", ] [[package]] @@ -3118,7 +3119,7 @@ dependencies = [ "futures", "getrandom", "http 0.2.12", - "hyper 0.14.28", + "hyper 0.14.30", "parking_lot 0.11.2", "reqwest 0.11.27", "reqwest-middleware", @@ -3176,7 +3177,7 @@ checksum = "e35ef0d79735ffcdf944895605c107b7f04ea097ce5a39d3191f9b2d82f73f7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -3235,32 +3236,23 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.23", + "semver", ] [[package]] name = "rustc_version_runtime" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d31b7153270ebf48bf91c65ae5b0c00e749c4cfad505f66530ac74950249582f" +checksum = "2dd18cd2bae1820af0b6ad5e54f4a51d0f3fcc53b05f845675074efcc7af071d" dependencies = [ - "rustc_version 0.2.3", - "semver 0.9.0", + "rustc_version", + "semver", ] [[package]] @@ -3269,7 +3261,7 @@ version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -3290,13 +3282,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "once_cell", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.6", "subtle", "zeroize", ] @@ -3324,9 +3316,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ "base64 0.22.1", "rustls-pki-types", @@ -3334,9 +3326,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-webpki" @@ -3350,9 +3342,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -3405,7 +3397,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -3426,11 +3418,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -3439,62 +3431,47 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - [[package]] name = "semver" version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.14" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.209" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -3505,16 +3482,16 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "serde_json" -version = "1.0.125" +version = "1.0.127" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" +checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.4.0", "itoa", "memchr", "ryu", @@ -3539,7 +3516,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -3565,41 +3542,32 @@ dependencies = [ [[package]] name = "serde_with" -version = "1.14.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" -dependencies = [ - "serde", - "serde_with_macros", -] - -[[package]] -name = "serde_with" -version = "3.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.6", + "indexmap 2.4.0", "serde", "serde_derive", "serde_json", + "serde_with_macros", "time", ] [[package]] name = "serde_with_macros" -version = "1.5.2" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ - "darling 0.13.4", + "darling", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.75", ] [[package]] @@ -3608,7 +3576,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.4.0", "itoa", "ryu", "serde", @@ -3669,6 +3637,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -3744,21 +3718,15 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -3790,7 +3758,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -3803,14 +3771,14 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "svi" @@ -3834,9 +3802,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.64" +version = "2.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" +checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" dependencies = [ "proc-macro2", "quote", @@ -3854,12 +3822,15 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "sysinfo" -version = "0.31.2" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4115055da5f572fff541dd0c4e61b0262977f453cc9fe04be83aba25a89bdab" +checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" dependencies = [ "core-foundation-sys", "libc", @@ -3877,7 +3848,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bc6ee10a9b4fcf576e9b0819d95ec16f4d2c02d39fd83ac1c8789785c4a42" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -3890,6 +3872,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "take_mut" version = "0.2.2" @@ -3913,14 +3905,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3940,7 +3933,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -3986,9 +3979,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -4001,15 +3994,15 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.2", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2", @@ -4025,7 +4018,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -4054,7 +4047,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.12", "rustls-pki-types", "tokio", ] @@ -4135,7 +4128,7 @@ version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.4.0", "serde", "serde_spanned", "toml_datetime", @@ -4165,9 +4158,9 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "hyper 1.4.1", "hyper-timeout", @@ -4210,11 +4203,11 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "http-range-header", "httpdate", @@ -4231,15 +4224,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -4261,7 +4254,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -4330,51 +4323,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "trust-dns-proto" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.3", - "ipnet", - "lazy_static", - "log", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.21.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot 0.12.2", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "trust-dns-proto", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -4454,7 +4402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a615d6c2764852a2e88a4f16e9ce1ea49bb776b5872956309e170d63a042a34f" dependencies = [ "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -4463,7 +4411,7 @@ version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ - "version_check 0.9.4", + "version_check 0.9.5", ] [[package]] @@ -4487,6 +4435,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" + [[package]] name = "unsafe-libyaml" version = "0.2.11" @@ -4507,20 +4461,20 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "update_logger" -version = "1.13.4" +version = "1.14.0-rc1" dependencies = [ "anyhow", + "komodo_client", "logger", - "monitor_client", "tokio", "tracing", ] [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna 0.5.0", @@ -4542,9 +4496,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -4577,9 +4531,9 @@ checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "vsimd" @@ -4604,34 +4558,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -4641,9 +4596,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4651,22 +4606,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-timer" @@ -4685,9 +4640,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -4744,7 +4699,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ "windows-core 0.57.0", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -4753,7 +4708,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -4764,8 +4719,8 @@ checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ "windows-implement", "windows-interface", - "windows-result", - "windows-targets 0.52.5", + "windows-result 0.1.2", + "windows-targets 0.52.6", ] [[package]] @@ -4776,7 +4731,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] @@ -4787,7 +4742,18 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", ] [[package]] @@ -4796,7 +4762,26 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", ] [[package]] @@ -4814,7 +4799,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -4834,18 +4828,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -4856,9 +4850,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -4868,9 +4862,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -4880,15 +4874,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -4898,9 +4892,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -4910,9 +4904,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -4922,9 +4916,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -4934,9 +4928,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -4957,16 +4951,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wyz" version = "0.5.1" @@ -4984,26 +4968,27 @@ checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.64", + "syn 2.0.75", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" diff --git a/Cargo.toml b/Cargo.toml index 3fcdf3f59..dcf939237 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,19 +3,20 @@ resolver = "2" members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"] [workspace.package] -version = "1.13.4" +version = "1.14.0-rc1" edition = "2021" authors = ["mbecker20 "] license = "GPL-3.0-or-later" -repository = "https://github.com/mbecker20/monitor" -homepage = "https://docs.monitor.dev" +repository = "https://github.com/mbecker20/komodo" +homepage = "https://komo.do" [patch.crates-io] -monitor_client = { path = "client/core/rs" } +# komodo_client = { path = "client/core/rs" } [workspace.dependencies] # LOCAL -monitor_client = "1.13.3" +# komodo_client = "1.14.0" +komodo_client = { path = "client/core/rs" } periphery_client = { path = "client/periphery/rs" } formatting = { path = "lib/formatting" } command = { path = "lib/command" } @@ -35,12 +36,12 @@ derive_variants = "1.0.0" mongo_indexed = "2.0.1" resolver_api = "1.1.1" toml_pretty = "1.1.2" -mungos = "1.0.1" +mungos = "1.1.0" svi = "1.0.1" # ASYNC -tokio = { version = "1.39.2", features = ["full"] } -reqwest = { version = "0.12.5", features = ["json"] } +tokio = { version = "1.40.0", features = ["full"] } +reqwest = { version = "0.12.7", features = ["json"] } tokio-util = "0.7.11" futures = "0.3.30" futures-util = "0.3.30" @@ -53,9 +54,9 @@ tokio-tungstenite = "0.23.1" # SER/DE ordered_hash_map = { version = "0.4.0", features = ["serde"] } -serde = { version = "1.0.208", features = ["derive"] } +serde = { version = "1.0.209", features = ["derive"] } strum = { version = "0.26.3", features = ["derive"] } -serde_json = "1.0.125" +serde_json = "1.0.127" serde_yaml = "0.9.34" toml = "0.8.19" @@ -90,16 +91,16 @@ jwt = "0.16.0" hex = "0.4.3" # SYSTEM -bollard = "0.17.0" -sysinfo = "0.31.2" +bollard = "0.17.1" +sysinfo = "0.31.4" # CLOUD aws-config = "1.5.5" -aws-sdk-ec2 = "1.66.0" -aws-sdk-ecr = "1.40.0" +aws-sdk-ec2 = "1.70.0" +aws-sdk-ecr = "1.42.0" # MISC -derive_builder = "0.20.0" +derive_builder = "0.20.1" typeshare = "1.0.3" octorust = "0.7.0" colored = "2.1.0" diff --git a/bin/alerter/Cargo.toml b/bin/alerter/Cargo.toml index 0f03a8c46..37f92dffc 100644 --- a/bin/alerter/Cargo.toml +++ b/bin/alerter/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true [dependencies] # local -monitor_client.workspace = true +komodo_client.workspace = true logger.workspace = true # external tokio.workspace = true diff --git a/bin/alerter/Dockerfile b/bin/alerter/Dockerfile index 14441ab07..f920938c9 100644 --- a/bin/alerter/Dockerfile +++ b/bin/alerter/Dockerfile @@ -1,11 +1,11 @@ -FROM rust:1.71.1 as builder +FROM rust:1.80.1 as builder WORKDIR /builder COPY . . RUN cargo build -p alert_logger --release -FROM gcr.io/distroless/cc +FROM gcr.io/distroless/debian-cc COPY --from=builder /builder/target/release/alert_logger / diff --git a/bin/alerter/README.md b/bin/alerter/README.md index 3bec27a43..dba4c8fb2 100644 --- a/bin/alerter/README.md +++ b/bin/alerter/README.md @@ -1,4 +1,4 @@ # Alerter This crate sets up a basic axum server that listens for incoming alert POSTs. -It can be used as a monitor alerting endpoint, and serves as a template for other custom alerter implementations. \ No newline at end of file +It can be used as a Komodo alerting endpoint, and serves as a template for other custom alerter implementations. \ No newline at end of file diff --git a/bin/alerter/src/main.rs b/bin/alerter/src/main.rs index a28f4b1a2..0ee1872bf 100644 --- a/bin/alerter/src/main.rs +++ b/bin/alerter/src/main.rs @@ -5,9 +5,7 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::Context; use axum::{routing::post, Json, Router}; -use monitor_client::entities::{ - alert::Alert, server::stats::SeverityLevel, -}; +use komodo_client::entities::alert::{Alert, SeverityLevel}; use serde::Deserialize; #[derive(Deserialize)] diff --git a/bin/cli/Cargo.toml b/bin/cli/Cargo.toml index 8f4143ce9..f3f3dd67e 100644 --- a/bin/cli/Cargo.toml +++ b/bin/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "monitor_cli" -description = "Command line tool to sync monitor resources and execute file defined procedures" +name = "komodo_cli" +description = "Command line tool to execute Komodo actions" version.workspace = true edition.workspace = true authors.workspace = true @@ -9,14 +9,14 @@ homepage.workspace = true repository.workspace = true [[bin]] -name = "monitor" +name = "komodo" path = "src/main.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] # local -monitor_client.workspace = true +komodo_client.workspace = true # external tracing-subscriber.workspace = true merge_config_files.workspace = true diff --git a/bin/cli/README.md b/bin/cli/README.md index 8d9059eec..160589d9d 100644 --- a/bin/cli/README.md +++ b/bin/cli/README.md @@ -1,11 +1,11 @@ -# Monitor CLI +# Komodo CLI -Monitor CLI is a tool to sync monitor resources and execute operations. +Komodo CLI is a tool to execute actions on your Komodo instance from shell scripts. ## Install ```sh -cargo install monitor_cli +cargo install komodo_cli ``` Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-dev`. @@ -14,9 +14,9 @@ Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-de ### Credentials -Configure a file `~/.config/monitor/creds.toml` file with contents: +Configure a file `~/.config/komodo/creds.toml` file with contents: ```toml -url = "https://your.monitor.address" +url = "https://your.komodo.address" key = "YOUR-API-KEY" secret = "YOUR-API-SECRET" ``` @@ -25,21 +25,21 @@ Note. You can specify a different creds file by using `--creds ./other/path.toml You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`: ```sh -monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ... +komodo --url "https://your.komodo.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ... ``` ### Run Executions ```sh # Triggers an example build -monitor execute run-build test_build +komodo execute run-build test_build ``` #### Manual ```md Runs an execution -Usage: monitor execute +Usage: komodo execute Commands: none The "null" execution. Does nothing diff --git a/bin/cli/src/args.rs b/bin/cli/src/args.rs index 73697096a..01532df29 100644 --- a/bin/cli/src/args.rs +++ b/bin/cli/src/args.rs @@ -1,5 +1,5 @@ use clap::{Parser, Subcommand}; -use monitor_client::api::execute::Execution; +use komodo_client::api::execute::Execution; use serde::Deserialize; #[derive(Parser, Debug)] @@ -34,7 +34,7 @@ pub struct CliArgs { fn default_creds() -> String { let home = std::env::var("HOME").unwrap_or_else(|_| String::from("/root")); - format!("{home}/.config/monitor/creds.toml") + format!("{home}/.config/komodo/creds.toml") } #[derive(Debug, Clone, Subcommand)] diff --git a/bin/cli/src/exec.rs b/bin/cli/src/exec.rs index 78faa08a3..5ae4e2700 100644 --- a/bin/cli/src/exec.rs +++ b/bin/cli/src/exec.rs @@ -1,11 +1,11 @@ use std::time::Duration; use colored::Colorize; -use monitor_client::api::execute::Execution; +use komodo_client::api::execute::Execution; use crate::{ helpers::wait_for_enter, - state::{cli_args, monitor_client}, + state::{cli_args, komodo_client}, }; pub async fn run(execution: Execution) -> anyhow::Result<()> { @@ -33,6 +33,36 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::Deploy(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::StartDeployment(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::RestartDeployment(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PauseDeployment(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::UnpauseDeployment(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::StopDeployment(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::DestroyDeployment(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::CloneRepo(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PullRepo(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::BuildRepo(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::CancelRepoBuild(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::StartContainer(data) => { println!("{}: {data:?}", "Data".dimmed()) } @@ -48,31 +78,46 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::StopContainer(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::DestroyContainer(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::StartAllContainers(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::RestartAllContainers(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PauseAllContainers(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::UnpauseAllContainers(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::StopAllContainers(data) => { println!("{}: {data:?}", "Data".dimmed()) } - Execution::RemoveContainer(data) => { + Execution::PruneContainers(data) => { println!("{}: {data:?}", "Data".dimmed()) } - Execution::CloneRepo(data) => { - println!("{}: {data:?}", "Data".dimmed()) - } - Execution::PullRepo(data) => { - println!("{}: {data:?}", "Data".dimmed()) - } - Execution::BuildRepo(data) => { - println!("{}: {data:?}", "Data".dimmed()) - } - Execution::CancelRepoBuild(data) => { + Execution::DeleteNetwork(data) => { println!("{}: {data:?}", "Data".dimmed()) } Execution::PruneNetworks(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::DeleteImage(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::PruneImages(data) => { println!("{}: {data:?}", "Data".dimmed()) } - Execution::PruneContainers(data) => { + Execution::DeleteVolume(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PruneVolumes(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PruneSystem(data) => { println!("{}: {data:?}", "Data".dimmed()) } Execution::RunSync(data) => { @@ -112,82 +157,127 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { let res = match execution { Execution::RunProcedure(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::RunBuild(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::CancelBuild(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::Deploy(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } - Execution::StartContainer(request) => { - monitor_client().execute(request).await + Execution::StartDeployment(request) => { + komodo_client().execute(request).await } - Execution::RestartContainer(request) => { - monitor_client().execute(request).await + Execution::RestartDeployment(request) => { + komodo_client().execute(request).await } - Execution::PauseContainer(request) => { - monitor_client().execute(request).await + Execution::PauseDeployment(request) => { + komodo_client().execute(request).await } - Execution::UnpauseContainer(request) => { - monitor_client().execute(request).await + Execution::UnpauseDeployment(request) => { + komodo_client().execute(request).await } - Execution::StopContainer(request) => { - monitor_client().execute(request).await + Execution::StopDeployment(request) => { + komodo_client().execute(request).await } - Execution::StopAllContainers(request) => { - monitor_client().execute(request).await - } - Execution::RemoveContainer(request) => { - monitor_client().execute(request).await + Execution::DestroyDeployment(request) => { + komodo_client().execute(request).await } Execution::CloneRepo(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::PullRepo(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::BuildRepo(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::CancelRepoBuild(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } - Execution::PruneNetworks(request) => { - monitor_client().execute(request).await + Execution::StartContainer(request) => { + komodo_client().execute(request).await } - Execution::PruneImages(request) => { - monitor_client().execute(request).await + Execution::RestartContainer(request) => { + komodo_client().execute(request).await + } + Execution::PauseContainer(request) => { + komodo_client().execute(request).await + } + Execution::UnpauseContainer(request) => { + komodo_client().execute(request).await + } + Execution::StopContainer(request) => { + komodo_client().execute(request).await + } + Execution::DestroyContainer(request) => { + komodo_client().execute(request).await + } + Execution::StartAllContainers(request) => { + komodo_client().execute(request).await + } + Execution::RestartAllContainers(request) => { + komodo_client().execute(request).await + } + Execution::PauseAllContainers(request) => { + komodo_client().execute(request).await + } + Execution::UnpauseAllContainers(request) => { + komodo_client().execute(request).await + } + Execution::StopAllContainers(request) => { + komodo_client().execute(request).await } Execution::PruneContainers(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await + } + Execution::DeleteNetwork(request) => { + komodo_client().execute(request).await + } + Execution::PruneNetworks(request) => { + komodo_client().execute(request).await + } + Execution::DeleteImage(request) => { + komodo_client().execute(request).await + } + Execution::PruneImages(request) => { + komodo_client().execute(request).await + } + Execution::DeleteVolume(request) => { + komodo_client().execute(request).await + } + Execution::PruneVolumes(request) => { + komodo_client().execute(request).await + } + Execution::PruneSystem(request) => { + komodo_client().execute(request).await } Execution::RunSync(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::DeployStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::StartStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::RestartStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::PauseStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::UnpauseStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::StopStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::DestroyStack(request) => { - monitor_client().execute(request).await + komodo_client().execute(request).await } Execution::Sleep(request) => { let duration = diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index 62a5a3cbe..4ff563b8f 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -2,7 +2,7 @@ extern crate tracing; use colored::Colorize; -use monitor_client::api::read::GetVersion; +use komodo_client::api::read::GetVersion; mod args; mod exec; @@ -13,9 +13,14 @@ mod state; async fn main() -> anyhow::Result<()> { tracing_subscriber::fmt().with_target(false).init(); + info!( + "Komodo CLI version: {}", + env!("CARGO_PKG_VERSION").blue().bold() + ); + let version = - state::monitor_client().read(GetVersion {}).await?.version; - info!("monitor version: {}", version.to_string().blue().bold()); + state::komodo_client().read(GetVersion {}).await?.version; + info!("Komodo Core version: {}", version.blue().bold()); match &state::cli_args().command { args::Command::Execute { execution } => { diff --git a/bin/cli/src/state.rs b/bin/cli/src/state.rs index ed42b8ec8..5ef67bb3c 100644 --- a/bin/cli/src/state.rs +++ b/bin/cli/src/state.rs @@ -1,17 +1,17 @@ use std::sync::OnceLock; use clap::Parser; +use komodo_client::KomodoClient; use merge_config_files::parse_config_file; -use monitor_client::MonitorClient; pub fn cli_args() -> &'static crate::args::CliArgs { static CLI_ARGS: OnceLock = OnceLock::new(); CLI_ARGS.get_or_init(crate::args::CliArgs::parse) } -pub fn monitor_client() -> &'static MonitorClient { - static MONITOR_CLIENT: OnceLock = OnceLock::new(); - MONITOR_CLIENT.get_or_init(|| { +pub fn komodo_client() -> &'static KomodoClient { + static KOMODO_CLIENT: OnceLock = OnceLock::new(); + KOMODO_CLIENT.get_or_init(|| { let args = cli_args(); let crate::args::CredsFile { url, key, secret } = match (&args.url, &args.key, &args.secret) { @@ -25,7 +25,7 @@ pub fn monitor_client() -> &'static MonitorClient { (url, key, secret) => { let mut creds: crate::args::CredsFile = parse_config_file(cli_args().creds.as_str()) - .expect("failed to parse monitor credentials"); + .expect("failed to parse Komodo credentials"); if let Some(url) = url { creds.url.clone_from(url); @@ -40,7 +40,7 @@ pub fn monitor_client() -> &'static MonitorClient { creds } }; - futures::executor::block_on(MonitorClient::new(url, key, secret)) - .expect("failed to initialize monitor client") + futures::executor::block_on(KomodoClient::new(url, key, secret)) + .expect("failed to initialize Komodo client") }) } diff --git a/bin/core/Cargo.toml b/bin/core/Cargo.toml index a561ea899..58f004828 100644 --- a/bin/core/Cargo.toml +++ b/bin/core/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monitor_core" +name = "komodo_core" version.workspace = true edition.workspace = true authors.workspace = true @@ -15,7 +15,7 @@ path = "src/main.rs" [dependencies] # local -monitor_client = { workspace = true, features = ["mongo"] } +komodo_client = { workspace = true, features = ["mongo"] } periphery_client.workspace = true formatting.workspace = true logger.workspace = true diff --git a/bin/core/Dockerfile b/bin/core/Dockerfile index a6933ce37..908ff33b4 100644 --- a/bin/core/Dockerfile +++ b/bin/core/Dockerfile @@ -2,7 +2,7 @@ FROM rust:1.80.1-bookworm AS core-builder WORKDIR /builder COPY . . -RUN cargo build -p monitor_core --release +RUN cargo build -p komodo_core --release # Build Frontend FROM node:20.12-alpine AS frontend-builder @@ -10,7 +10,7 @@ WORKDIR /builder COPY ./frontend ./frontend COPY ./client/core/ts ./client RUN cd client && yarn && yarn build && yarn link -RUN cd frontend && yarn link @monitor/client && yarn && yarn build +RUN cd frontend && yarn link @komodo/client && yarn && yarn build # Final Image FROM debian:bookworm-slim @@ -32,8 +32,8 @@ COPY --from=frontend-builder /builder/frontend/dist /frontend EXPOSE 9000 # Label for Ghcr -LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor -LABEL org.opencontainers.image.description="Monitor Core" +LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo +LABEL org.opencontainers.image.description="Komodo Core" LABEL org.opencontainers.image.licenses=GPL-3.0 CMD ["./core"] \ No newline at end of file diff --git a/bin/core/src/api/auth.rs b/bin/core/src/api/auth.rs index 6ec2a06bd..707379cdd 100644 --- a/bin/core/src/api/auth.rs +++ b/bin/core/src/api/auth.rs @@ -3,7 +3,7 @@ use std::{sync::OnceLock, time::Instant}; use anyhow::anyhow; use axum::{http::HeaderMap, routing::post, Router}; use axum_extra::{headers::ContentType, TypedHeader}; -use monitor_client::{api::auth::*, entities::user::User}; +use komodo_client::{api::auth::*, entities::user::User}; use resolver_api::{derive::Resolver, Resolve, Resolver}; use serde::{Deserialize, Serialize}; use serror::Json; diff --git a/bin/core/src/api/execute/build.rs b/bin/core/src/api/execute/build.rs index 23edd9515..f478e2889 100644 --- a/bin/core/src/api/execute/build.rs +++ b/bin/core/src/api/execute/build.rs @@ -3,19 +3,18 @@ use std::{collections::HashSet, future::IntoFuture, time::Duration}; use anyhow::{anyhow, Context}; use formatting::format_serror; use futures::future::join_all; -use monitor_client::{ +use komodo_client::{ api::execute::{CancelBuild, Deploy, RunBuild}, entities::{ - alert::{Alert, AlertData}, + alert::{Alert, AlertData, SeverityLevel}, all_logs_success, build::{Build, ImageRegistry, StandardRegistryConfig}, builder::{Builder, BuilderConfig}, config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials}, deployment::DeploymentState, - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, - server::stats::SeverityLevel, - to_monitor_name, + to_komodo_name, update::{Log, Update}, user::{auto_redeploy_user, User}, }, @@ -28,7 +27,7 @@ use mungos::{ options::FindOneOptions, }, }; -use periphery_client::api::{self, git::RepoActionResponseV1_13}; +use periphery_client::api; use resolver_api::Resolve; use tokio_util::sync::CancellationToken; @@ -40,16 +39,20 @@ use crate::{ builder::{cleanup_builder_instance, get_builder_periphery}, channel::build_cancel_channel, git_token, - query::{get_deployment_state, get_global_variables}, + interpolate::{ + add_interp_update_log, + interpolate_variables_secrets_into_environment, + interpolate_variables_secrets_into_extra_args, + interpolate_variables_secrets_into_system_command, + }, + query::{get_deployment_state, get_variables_and_secrets}, registry_token, - update::update_update, + update::{init_execution_update, update_update}, }, resource::{self, refresh_build_state_cache}, state::{action_states, db_client, State}, }; -use crate::helpers::update::init_execution_update; - use super::ExecuteRequest; impl Resolve for State { @@ -65,6 +68,7 @@ impl Resolve for State { PermissionLevel::Execute, ) .await?; + let vars_and_secrets = get_variables_and_secrets().await?; if build.config.builder_id.is_empty() { return Err(anyhow!("Must attach builder to RunBuild")); @@ -172,6 +176,29 @@ impl Resolve for State { // CLONE REPO + let secret_replacers = if !build.config.skip_secret_interp { + // Interpolate variables / secrets into pre build command + let mut global_replacers = HashSet::new(); + let mut secret_replacers = HashSet::new(); + + interpolate_variables_secrets_into_system_command( + &vars_and_secrets, + &mut build.config.pre_build, + &mut global_replacers, + &mut secret_replacers, + )?; + + add_interp_update_log( + &mut update, + &global_replacers, + &secret_replacers, + ); + + secret_replacers + } else { + Default::default() + }; + let res = tokio::select! { res = periphery .request(api::git::CloneRepo { @@ -180,6 +207,7 @@ impl Resolve for State { environment: Default::default(), env_file_path: Default::default(), skip_secret_interp: Default::default(), + replacers: secret_replacers.into_iter().collect(), }) => res, _ = cancel.cancelled() => { debug!("build cancelled during clone, cleaning up builder"); @@ -194,7 +222,6 @@ impl Resolve for State { let commit_message = match res { Ok(res) => { debug!("finished repo clone"); - let res: RepoActionResponseV1_13 = res.into(); update.logs.extend(res.logs); update.commit_hash = res.commit_hash.unwrap_or_default().to_string(); @@ -214,90 +241,36 @@ impl Resolve for State { if all_logs_success(&update.logs) { let secret_replacers = if !build.config.skip_secret_interp { - let core_config = core_config(); - let variables = get_global_variables().await?; // Interpolate variables / secrets into build args let mut global_replacers = HashSet::new(); let mut secret_replacers = HashSet::new(); - let mut secret_replacers_for_log = HashSet::new(); - // Interpolate into build args - for arg in &mut build.config.build_args { - // first pass - global variables - let (res, more_replacers) = svi::interpolate_variables( - &arg.value, - &variables, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate global variables")?; - global_replacers.extend(more_replacers); - // second pass - core secrets - let (res, more_replacers) = svi::interpolate_variables( - &res, - &core_config.secrets, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate core secrets")?; - secret_replacers_for_log.extend( - more_replacers - .iter() - .map(|(_, variable)| variable.clone()), - ); - secret_replacers.extend(more_replacers); - arg.value = res; - } + interpolate_variables_secrets_into_environment( + &vars_and_secrets, + &mut build.config.build_args, + &mut global_replacers, + &mut secret_replacers, + )?; - // Interpolate into secret args - for arg in &mut build.config.secret_args { - // first pass - global variables - let (res, more_replacers) = svi::interpolate_variables( - &arg.value, - &variables, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate global variables")?; - global_replacers.extend(more_replacers); - // second pass - core secrets - let (res, more_replacers) = svi::interpolate_variables( - &res, - &core_config.secrets, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate core secrets")?; - secret_replacers_for_log.extend( - more_replacers.into_iter().map(|(_, variable)| variable), - ); - // Secret args don't need to be in replacers sent to periphery. - // The secret args don't end up in the command like build args do. - arg.value = res; - } + interpolate_variables_secrets_into_environment( + &vars_and_secrets, + &mut build.config.secret_args, + &mut global_replacers, + &mut secret_replacers, + )?; - // Show which variables were interpolated - if !global_replacers.is_empty() { - update.push_simple_log( - "interpolate global variables", - global_replacers - .into_iter() - .map(|(value, variable)| format!("{variable} => {value}")) - .collect::>() - .join("\n"), - ); - } + interpolate_variables_secrets_into_extra_args( + &vars_and_secrets, + &mut build.config.extra_args, + &mut global_replacers, + &mut secret_replacers, + )?; - if !secret_replacers_for_log.is_empty() { - update.push_simple_log( - "interpolate core secrets", - secret_replacers_for_log - .into_iter() - .map(|variable| format!("replaced: {variable}")) - .collect::>() - .join("\n"), - ); - } + add_interp_update_log( + &mut update, + &global_replacers, + &secret_replacers, + ); secret_replacers } else { @@ -354,7 +327,7 @@ impl Resolve for State { doc! { "$set": { "config.version": to_bson(&build.config.version) .context("failed at converting version to bson")?, - "info.last_built_at": monitor_timestamp(), + "info.last_built_at": komodo_timestamp(), "info.built_hash": &update.commit_hash, "info.built_message": commit_message }}, @@ -398,8 +371,8 @@ impl Resolve for State { let alert = Alert { id: Default::default(), target, - ts: monitor_timestamp(), - resolved_ts: Some(monitor_timestamp()), + ts: komodo_timestamp(), + resolved_ts: Some(komodo_timestamp()), resolved: true, level: SeverityLevel::Warning, data: AlertData::BuildFailed { @@ -447,8 +420,8 @@ async fn handle_early_return( let alert = Alert { id: Default::default(), target, - ts: monitor_timestamp(), - resolved_ts: Some(monitor_timestamp()), + ts: komodo_timestamp(), + resolved_ts: Some(komodo_timestamp()), resolved: true, level: SeverityLevel::Warning, data: AlertData::BuildFailed { @@ -657,7 +630,7 @@ async fn validate_account_extract_registry_token_aws_ecr( .await .context("failed to get aws ecr token")?; ecr::maybe_create_repo( - &to_monitor_name(&build.name), + &to_komodo_name(&build.name), region.to_string(), access_key_id, secret_access_key, diff --git a/bin/core/src/api/execute/deployment.rs b/bin/core/src/api/execute/deployment.rs index 871c21956..23caa87ea 100644 --- a/bin/core/src/api/execute/deployment.rs +++ b/bin/core/src/api/execute/deployment.rs @@ -1,6 +1,8 @@ +use std::collections::HashSet; + use anyhow::{anyhow, Context}; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::execute::*, entities::{ build::{Build, ImageRegistry}, @@ -10,7 +12,7 @@ use monitor_client::{ }, get_image_name, permission::PermissionLevel, - server::{Server, ServerState}, + server::Server, update::{Log, Update}, user::User, Version, @@ -23,8 +25,15 @@ use crate::{ cloud::aws::ecr, config::core_config, helpers::{ - interpolate_variables_secrets_into_environment, periphery_client, - query::get_server_with_status, registry_token, + interpolate::{ + add_interp_update_log, + interpolate_variables_secrets_into_container_command, + interpolate_variables_secrets_into_environment, + interpolate_variables_secrets_into_extra_args, + }, + periphery_client, + query::get_variables_and_secrets, + registry_token, update::update_update, }, monitor::update_cache_for_server, @@ -47,13 +56,8 @@ async fn setup_deployment_execution( return Err(anyhow!("deployment has no server configured")); } - let (server, status) = - get_server_with_status(&deployment.config.server_id).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } + let server = + resource::get::(&deployment.config.server_id).await?; Ok((deployment, server)) } @@ -88,6 +92,12 @@ impl Resolve for State { let periphery = periphery_client(&server)?; + periphery + .health_check() + .await + .context("Failed server health check, stopping run.")?; + + // This block resolves the attached Build to an actual versioned image let (version, registry_token, aws_ecr) = match &deployment .config .image @@ -181,12 +191,42 @@ impl Resolve for State { } }; + // interpolate variables / secrets, returning the sanitizing replacers to send to + // periphery so it may sanitize the final command for safe logging (avoids exposing secret values) let secret_replacers = if !deployment.config.skip_secret_interp { + let vars_and_secrets = get_variables_and_secrets().await?; + + let mut global_replacers = HashSet::new(); + let mut secret_replacers = HashSet::new(); + interpolate_variables_secrets_into_environment( + &vars_and_secrets, &mut deployment.config.environment, + &mut global_replacers, + &mut secret_replacers, + )?; + + interpolate_variables_secrets_into_extra_args( + &vars_and_secrets, + &mut deployment.config.extra_args, + &mut global_replacers, + &mut secret_replacers, + )?; + + interpolate_variables_secrets_into_container_command( + &vars_and_secrets, + &mut deployment.config.command, + &mut global_replacers, + &mut secret_replacers, + )?; + + add_interp_update_log( &mut update, - ) - .await? + &global_replacers, + &secret_replacers, + ); + + secret_replacers } else { Default::default() }; @@ -225,11 +265,11 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] +impl Resolve for State { + #[instrument(name = "StartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( &self, - StartContainer { deployment }: StartContainer, + StartDeployment { deployment }: StartDeployment, (user, mut update): (User, Update), ) -> anyhow::Result { let (deployment, server) = @@ -253,7 +293,7 @@ impl Resolve for State { let log = match periphery .request(api::container::StartContainer { - name: deployment.name.clone(), + name: deployment.name, }) .await { @@ -273,11 +313,11 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] +impl Resolve for State { + #[instrument(name = "RestartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( &self, - RestartContainer { deployment }: RestartContainer, + RestartDeployment { deployment }: RestartDeployment, (user, mut update): (User, Update), ) -> anyhow::Result { let (deployment, server) = @@ -301,7 +341,7 @@ impl Resolve for State { let log = match periphery .request(api::container::RestartContainer { - name: deployment.name.clone(), + name: deployment.name, }) .await { @@ -323,11 +363,11 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "PauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] +impl Resolve for State { + #[instrument(name = "PauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( &self, - PauseContainer { deployment }: PauseContainer, + PauseDeployment { deployment }: PauseDeployment, (user, mut update): (User, Update), ) -> anyhow::Result { let (deployment, server) = @@ -351,7 +391,7 @@ impl Resolve for State { let log = match periphery .request(api::container::PauseContainer { - name: deployment.name.clone(), + name: deployment.name, }) .await { @@ -371,11 +411,11 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "UnpauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] +impl Resolve for State { + #[instrument(name = "UnpauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( &self, - UnpauseContainer { deployment }: UnpauseContainer, + UnpauseDeployment { deployment }: UnpauseDeployment, (user, mut update): (User, Update), ) -> anyhow::Result { let (deployment, server) = @@ -399,7 +439,7 @@ impl Resolve for State { let log = match periphery .request(api::container::UnpauseContainer { - name: deployment.name.clone(), + name: deployment.name, }) .await { @@ -421,15 +461,15 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] +impl Resolve for State { + #[instrument(name = "StopDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( &self, - StopContainer { + StopDeployment { deployment, signal, time, - }: StopContainer, + }: StopDeployment, (user, mut update): (User, Update), ) -> anyhow::Result { let (deployment, server) = @@ -453,7 +493,7 @@ impl Resolve for State { let log = match periphery .request(api::container::StopContainer { - name: deployment.name.clone(), + name: deployment.name, signal: signal .unwrap_or(deployment.config.termination_signal) .into(), @@ -479,15 +519,15 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] +impl Resolve for State { + #[instrument(name = "DestroyDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( &self, - RemoveContainer { + DestroyDeployment { deployment, signal, time, - }: RemoveContainer, + }: DestroyDeployment, (user, mut update): (User, Update), ) -> anyhow::Result { let (deployment, server) = @@ -502,7 +542,7 @@ impl Resolve for State { // Will check to ensure deployment not already busy before updating, and return Err if so. // The returned guard will set the action state back to default when dropped. let _action_guard = - action_state.update(|state| state.removing = true)?; + action_state.update(|state| state.destroying = true)?; // Send update after setting action state, this way frontend gets correct state. update_update(update.clone()).await?; @@ -511,7 +551,7 @@ impl Resolve for State { let log = match periphery .request(api::container::RemoveContainer { - name: deployment.name.clone(), + name: deployment.name, signal: signal .unwrap_or(deployment.config.termination_signal) .into(), diff --git a/bin/core/src/api/execute/mod.rs b/bin/core/src/api/execute/mod.rs index 8401536fc..4123887a8 100644 --- a/bin/core/src/api/execute/mod.rs +++ b/bin/core/src/api/execute/mod.rs @@ -3,7 +3,7 @@ use std::time::Instant; use anyhow::{anyhow, Context}; use axum::{middleware, routing::post, Extension, Router}; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::execute::*, entities::{ update::{Log, Update}, @@ -39,19 +39,34 @@ mod sync; #[serde(tag = "type", content = "params")] pub enum ExecuteRequest { // ==== SERVER ==== - StopAllContainers(StopAllContainers), - PruneContainers(PruneContainers), - PruneImages(PruneImages), - PruneNetworks(PruneNetworks), - - // ==== DEPLOYMENT ==== - Deploy(Deploy), StartContainer(StartContainer), RestartContainer(RestartContainer), PauseContainer(PauseContainer), UnpauseContainer(UnpauseContainer), StopContainer(StopContainer), - RemoveContainer(RemoveContainer), + DestroyContainer(DestroyContainer), + StartAllContainers(StartAllContainers), + RestartAllContainers(RestartAllContainers), + PauseAllContainers(PauseAllContainers), + UnpauseAllContainers(UnpauseAllContainers), + StopAllContainers(StopAllContainers), + PruneContainers(PruneContainers), + DeleteNetwork(DeleteNetwork), + PruneNetworks(PruneNetworks), + DeleteImage(DeleteImage), + PruneImages(PruneImages), + DeleteVolume(DeleteVolume), + PruneVolumes(PruneVolumes), + PruneSystem(PruneSystem), + + // ==== DEPLOYMENT ==== + Deploy(Deploy), + StartDeployment(StartDeployment), + RestartDeployment(RestartDeployment), + PauseDeployment(PauseDeployment), + UnpauseDeployment(UnpauseDeployment), + StopDeployment(StopDeployment), + DestroyDeployment(DestroyDeployment), // ==== STACK ==== DeployStack(DeployStack), @@ -144,10 +159,7 @@ async fn task( user: User, update: Update, ) -> anyhow::Result { - info!( - "/execute request {req_id} | user: {} ({})", - user.username, user.id - ); + info!("/execute request {req_id} | user: {}", user.username); let timer = Instant::now(); let res = State diff --git a/bin/core/src/api/execute/procedure.rs b/bin/core/src/api/execute/procedure.rs index 975e078a7..075924c2e 100644 --- a/bin/core/src/api/execute/procedure.rs +++ b/bin/core/src/api/execute/procedure.rs @@ -1,7 +1,7 @@ use std::pin::Pin; use formatting::{bold, colored, format_serror, muted, Color}; -use monitor_client::{ +use komodo_client::{ api::execute::RunProcedure, entities::{ permission::PermissionLevel, procedure::Procedure, diff --git a/bin/core/src/api/execute/repo.rs b/bin/core/src/api/execute/repo.rs index 22e719bbe..5b974c7ea 100644 --- a/bin/core/src/api/execute/repo.rs +++ b/bin/core/src/api/execute/repo.rs @@ -1,16 +1,16 @@ -use std::{future::IntoFuture, time::Duration}; +use std::{collections::HashSet, future::IntoFuture, time::Duration}; use anyhow::{anyhow, Context}; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::execute::*, entities::{ - alert::{Alert, AlertData}, + alert::{Alert, AlertData, SeverityLevel}, builder::{Builder, BuilderConfig}, - monitor_timestamp, optional_string, + komodo_timestamp, optional_string, permission::PermissionLevel, repo::Repo, - server::{stats::SeverityLevel, Server}, + server::Server, update::{Log, Update}, user::User, }, @@ -22,7 +22,7 @@ use mungos::{ options::FindOneOptions, }, }; -use periphery_client::api::{self, git::RepoActionResponseV1_13}; +use periphery_client::api; use resolver_api::Resolve; use tokio_util::sync::CancellationToken; @@ -31,7 +31,14 @@ use crate::{ alert::send_alerts, builder::{cleanup_builder_instance, get_builder_periphery}, channel::repo_cancel_channel, - git_token, periphery_client, + git_token, + interpolate::{ + add_interp_update_log, + interpolate_variables_secrets_into_environment, + interpolate_variables_secrets_into_system_command, + }, + periphery_client, + query::get_variables_and_secrets, update::update_update, }, resource::{self, refresh_repo_state_cache}, @@ -84,6 +91,11 @@ impl Resolve for State { let periphery = periphery_client(&server)?; + // interpolate variables / secrets, returning the sanitizing replacers to send to + // periphery so it may sanitize the final command for safe logging (avoids exposing secret values) + let secret_replacers = + interpolate(&mut repo, &mut update).await?; + let logs = match periphery .request(api::git::CloneRepo { args: (&repo).into(), @@ -91,13 +103,11 @@ impl Resolve for State { environment: repo.config.environment, env_file_path: repo.config.env_file_path, skip_secret_interp: repo.config.skip_secret_interp, + replacers: secret_replacers.into_iter().collect(), }) .await { - Ok(res) => { - let res: RepoActionResponseV1_13 = res.into(); - res.logs - } + Ok(res) => res.logs, Err(e) => { vec![Log::error( "clone repo", @@ -124,7 +134,7 @@ impl Resolve for State { PullRepo { repo }: PullRepo, (user, mut update): (User, Update), ) -> anyhow::Result { - let repo = resource::get_check_permissions::( + let mut repo = resource::get_check_permissions::( &repo, &user, PermissionLevel::Execute, @@ -151,6 +161,11 @@ impl Resolve for State { let periphery = periphery_client(&server)?; + // interpolate variables / secrets, returning the sanitizing replacers to send to + // periphery so it may sanitize the final command for safe logging (avoids exposing secret values) + let secret_replacers = + interpolate(&mut repo, &mut update).await?; + let logs = match periphery .request(api::git::PullRepo { name: repo.name.clone(), @@ -160,11 +175,11 @@ impl Resolve for State { environment: repo.config.environment, env_file_path: repo.config.env_file_path, skip_secret_interp: repo.config.skip_secret_interp, + replacers: secret_replacers.into_iter().collect(), }) .await { Ok(res) => { - let res: RepoActionResponseV1_13 = res.into(); update.commit_hash = res.commit_hash.unwrap_or_default(); res.logs } @@ -217,7 +232,7 @@ async fn update_last_pulled_time(repo_name: &str) { .repos .update_one( doc! { "name": repo_name }, - doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } }, + doc! { "$set": { "info.last_pulled_at": komodo_timestamp() } }, ) .await; if let Err(e) = res { @@ -337,14 +352,20 @@ impl Resolve for State { // CLONE REPO + // interpolate variables / secrets, returning the sanitizing replacers to send to + // periphery so it may sanitize the final command for safe logging (avoids exposing secret values) + let secret_replacers = + interpolate(&mut repo, &mut update).await?; + let res = tokio::select! { res = periphery .request(api::git::CloneRepo { args: (&repo).into(), git_token, - environment: Default::default(), - env_file_path: Default::default(), - skip_secret_interp: Default::default(), + environment: repo.config.environment, + env_file_path: repo.config.env_file_path, + skip_secret_interp: repo.config.skip_secret_interp, + replacers: secret_replacers.into_iter().collect() }) => res, _ = cancel.cancelled() => { debug!("build cancelled during clone, cleaning up builder"); @@ -359,7 +380,6 @@ impl Resolve for State { let commit_message = match res { Ok(res) => { debug!("finished repo clone"); - let res: RepoActionResponseV1_13 = res.into(); update.logs.extend(res.logs); update.commit_hash = res.commit_hash.unwrap_or_default(); res.commit_message.unwrap_or_default() @@ -383,7 +403,7 @@ impl Resolve for State { .update_one( doc! { "name": &repo.name }, doc! { "$set": { - "info.last_built_at": monitor_timestamp(), + "info.last_built_at": komodo_timestamp(), "info.built_hash": &update.commit_hash, "info.built_message": commit_message }}, @@ -421,8 +441,8 @@ impl Resolve for State { let alert = Alert { id: Default::default(), target, - ts: monitor_timestamp(), - resolved_ts: Some(monitor_timestamp()), + ts: komodo_timestamp(), + resolved_ts: Some(komodo_timestamp()), resolved: true, level: SeverityLevel::Warning, data: AlertData::RepoBuildFailed { @@ -468,8 +488,8 @@ async fn handle_builder_early_return( let alert = Alert { id: Default::default(), target, - ts: monitor_timestamp(), - resolved_ts: Some(monitor_timestamp()), + ts: komodo_timestamp(), + resolved_ts: Some(komodo_timestamp()), resolved: true, level: SeverityLevel::Warning, data: AlertData::RepoBuildFailed { @@ -589,3 +609,46 @@ impl Resolve for State { Ok(update) } } + +async fn interpolate( + repo: &mut Repo, + update: &mut Update, +) -> anyhow::Result> { + if !repo.config.skip_secret_interp { + let vars_and_secrets = get_variables_and_secrets().await?; + + let mut global_replacers = HashSet::new(); + let mut secret_replacers = HashSet::new(); + + interpolate_variables_secrets_into_environment( + &vars_and_secrets, + &mut repo.config.environment, + &mut global_replacers, + &mut secret_replacers, + )?; + + interpolate_variables_secrets_into_system_command( + &vars_and_secrets, + &mut repo.config.on_clone, + &mut global_replacers, + &mut secret_replacers, + )?; + + interpolate_variables_secrets_into_system_command( + &vars_and_secrets, + &mut repo.config.on_pull, + &mut global_replacers, + &mut secret_replacers, + )?; + + add_interp_update_log( + update, + &global_replacers, + &secret_replacers, + ); + + Ok(secret_replacers) + } else { + Ok(Default::default()) + } +} diff --git a/bin/core/src/api/execute/server.rs b/bin/core/src/api/execute/server.rs index da383ccc3..3820cb82e 100644 --- a/bin/core/src/api/execute/server.rs +++ b/bin/core/src/api/execute/server.rs @@ -1,12 +1,12 @@ -use anyhow::{anyhow, Context}; +use anyhow::Context; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::execute::*, entities::{ - all_logs_success, monitor_timestamp, + all_logs_success, permission::PermissionLevel, - server::{Server, ServerState}, - update::{Log, Update, UpdateStatus}, + server::Server, + update::{Log, Update}, user::User, }, }; @@ -14,14 +14,540 @@ use periphery_client::api; use resolver_api::Resolve; use crate::{ - helpers::{ - periphery_client, query::get_server_with_status, - update::update_update, - }, + helpers::{periphery_client, update::update_update}, + monitor::update_cache_for_server, resource, state::{action_states, State}, }; +impl Resolve for State { + #[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + StartContainer { server, container }: StartContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure deployment not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.starting_containers = true)?; + + // Send update after setting action state, this way frontend gets correct state. + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::StartContainer { name: container }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "start container", + format_serror(&e.context("failed to start container").into()), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + RestartContainer { server, container }: RestartContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the deployment (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.restarting_containers = true)?; + + // Send update after setting action state, this way frontend gets correct state. + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::RestartContainer { name: container }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "restart container", + format_serror( + &e.context("failed to restart container").into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "PauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + PauseContainer { server, container }: PauseContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = + action_state.update(|state| state.pausing_containers = true)?; + + // Send update after setting action state, this way frontend gets correct state. + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::PauseContainer { name: container }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "pause container", + format_serror(&e.context("failed to pause container").into()), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "UnpauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + UnpauseContainer { server, container }: UnpauseContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.unpausing_containers = true)?; + + // Send update after setting action state, this way frontend gets correct state. + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::UnpauseContainer { name: container }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "unpause container", + format_serror( + &e.context("failed to unpause container").into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + StopContainer { + server, + container, + signal, + time, + }: StopContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.stopping_containers = true)?; + + // Send update after setting action state, this way frontend gets correct state. + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::StopContainer { + name: container, + signal, + time, + }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "stop container", + format_serror(&e.context("failed to stop container").into()), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "DestroyContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + DestroyContainer { + server, + container, + signal, + time, + }: DestroyContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = + action_state.update(|state| state.pruning_containers = true)?; + + // Send update after setting action state, this way frontend gets correct state. + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::RemoveContainer { + name: container, + signal, + time, + }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "stop container", + format_serror(&e.context("failed to stop container").into()), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "StartAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + StartAllContainers { server }: StartAllContainers, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.starting_containers = true)?; + + update_update(update.clone()).await?; + + let logs = periphery_client(&server)? + .request(api::container::StartAllContainers {}) + .await + .context("failed to start all containers on host")?; + + update.logs.extend(logs); + + if all_logs_success(&update.logs) { + update.push_simple_log( + "start all containers", + String::from("All containers have been started on the host."), + ); + } + + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "RestartAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + RestartAllContainers { server }: RestartAllContainers, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.restarting_containers = true)?; + + update_update(update.clone()).await?; + + let logs = periphery_client(&server)? + .request(api::container::StartAllContainers {}) + .await + .context("failed to restart all containers on host")?; + + update.logs.extend(logs); + + if all_logs_success(&update.logs) { + update.push_simple_log( + "restart all containers", + String::from( + "All containers have been restarted on the host.", + ), + ); + } + + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "PauseAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + PauseAllContainers { server }: PauseAllContainers, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = + action_state.update(|state| state.pausing_containers = true)?; + + update_update(update.clone()).await?; + + let logs = periphery_client(&server)? + .request(api::container::PauseAllContainers {}) + .await + .context("failed to pause all containers on host")?; + + update.logs.extend(logs); + + if all_logs_success(&update.logs) { + update.push_simple_log( + "pause all containers", + String::from("All containers have been paused on the host."), + ); + } + + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "UnpauseAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + UnpauseAllContainers { server }: UnpauseAllContainers, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.starting_containers = true)?; + + update_update(update.clone()).await?; + + let logs = periphery_client(&server)? + .request(api::container::StartAllContainers {}) + .await + .context("failed to unpause all containers on host")?; + + update.logs.extend(logs); + + if all_logs_success(&update.logs) { + update.push_simple_log( + "unpause all containers", + String::from( + "All containers have been unpaused on the host.", + ), + ); + } + + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + impl Resolve for State { #[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( @@ -29,12 +555,12 @@ impl Resolve for State { StopAllContainers { server }: StopAllContainers, (user, mut update): (User, Update), ) -> anyhow::Result { - let (server, status) = get_server_with_status(&server).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; // get the action state for the server (or insert default). let action_state = action_states() @@ -52,14 +578,18 @@ impl Resolve for State { let logs = periphery_client(&server)? .request(api::container::StopAllContainers {}) .await - .context("failed to stop all container on host")?; + .context("failed to stop all containers on host")?; update.logs.extend(logs); if all_logs_success(&update.logs) { - update.push_simple_log("stop all containers", String::from("All containers have successfully been stopped on the host.")); + update.push_simple_log( + "stop all containers", + String::from("All containers have been stopped on the host."), + ); } + update_cache_for_server(&server).await; update.finalize(); update_update(update.clone()).await?; @@ -112,11 +642,55 @@ impl Resolve for State { ), }; - update.success = log.success; - update.status = UpdateStatus::Complete; - update.end_ts = Some(monitor_timestamp()); update.logs.push(log); + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "DeleteNetwork", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + DeleteNetwork { server, name }: DeleteNetwork, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::network::DeleteNetwork { name: name.clone() }) + .await + .context(format!( + "failed to delete network {name} on server {}", + server.name + )) { + Ok(log) => log, + Err(e) => Log::error( + "delete network", + format_serror( + &e.context(format!("failed to delete network {name}")) + .into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); update_update(update.clone()).await?; Ok(update) @@ -166,11 +740,54 @@ impl Resolve for State { ), }; - update.success = log.success; - update.status = UpdateStatus::Complete; - update.end_ts = Some(monitor_timestamp()); update.logs.push(log); + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "DeleteImage", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + DeleteImage { server, name }: DeleteImage, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::image::DeleteImage { name: name.clone() }) + .await + .context(format!( + "failed to delete image {name} on server {}", + server.name + )) { + Ok(log) => log, + Err(e) => Log::error( + "delete image", + format_serror( + &e.context(format!("failed to delete image {name}")).into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); update_update(update.clone()).await?; Ok(update) @@ -207,7 +824,7 @@ impl Resolve for State { let periphery = periphery_client(&server)?; let log = - match periphery.request(api::build::PruneImages {}).await { + match periphery.request(api::image::PruneImages {}).await { Ok(log) => log, Err(e) => Log::error( "prune images", @@ -219,9 +836,155 @@ impl Resolve for State { }; update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "DeleteVolume", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + DeleteVolume { server, name }: DeleteVolume, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::volume::DeleteVolume { name: name.clone() }) + .await + .context(format!( + "failed to delete volume {name} on server {}", + server.name + )) { + Ok(log) => log, + Err(e) => Log::error( + "delete volume", + format_serror( + &e.context(format!("failed to delete volume {name}")) + .into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "PruneVolumes", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + PruneVolumes { server }: PruneVolumes, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = + action_state.update(|state| state.pruning_volumes = true)?; + + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = + match periphery.request(api::volume::PruneVolumes {}).await { + Ok(log) => log, + Err(e) => Log::error( + "prune volumes", + format!( + "failed to prune volumes on server {} | {e:#?}", + server.name + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "PruneSystem", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + PruneSystem { server }: PruneSystem, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Execute, + ) + .await?; + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = + action_state.update(|state| state.pruning_system = true)?; + + update_update(update.clone()).await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery.request(api::PruneSystem {}).await { + Ok(log) => log, + Err(e) => Log::error( + "prune system", + format!( + "failed to docket system prune on server {} | {e:#?}", + server.name + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; update.finalize(); - update_update(update.clone()).await?; Ok(update) diff --git a/bin/core/src/api/execute/server_template.rs b/bin/core/src/api/execute/server_template.rs index a25aa5e96..8d8e1fa59 100644 --- a/bin/core/src/api/execute/server_template.rs +++ b/bin/core/src/api/execute/server_template.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context}; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::{execute::LaunchServer, write::CreateServer}, entities::{ permission::PermissionLevel, diff --git a/bin/core/src/api/execute/stack.rs b/bin/core/src/api/execute/stack.rs index 238a5b60f..5012feb19 100644 --- a/bin/core/src/api/execute/stack.rs +++ b/bin/core/src/api/execute/stack.rs @@ -1,6 +1,8 @@ +use std::collections::HashSet; + use anyhow::Context; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::execute::*, entities::{ permission::PermissionLevel, stack::StackInfo, update::Update, @@ -13,7 +15,13 @@ use resolver_api::Resolve; use crate::{ helpers::{ - interpolate_variables_secrets_into_environment, periphery_client, + interpolate::{ + add_interp_update_log, + interpolate_variables_secrets_into_environment, + interpolate_variables_secrets_into_extra_args, + }, + periphery_client, + query::get_variables_and_secrets, stack::{ execute::execute_compose, get_stack_and_server, services::extract_services_into_res, @@ -65,13 +73,38 @@ impl Resolve for State { || format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account), )?; - if !stack.config.skip_secret_interp { + // interpolate variables / secrets, returning the sanitizing replacers to send to + // periphery so it may sanitize the final command for safe logging (avoids exposing secret values) + let secret_replacers = if !stack.config.skip_secret_interp { + let vars_and_secrets = get_variables_and_secrets().await?; + + let mut global_replacers = HashSet::new(); + let mut secret_replacers = HashSet::new(); + interpolate_variables_secrets_into_environment( + &vars_and_secrets, &mut stack.config.environment, + &mut global_replacers, + &mut secret_replacers, + )?; + + interpolate_variables_secrets_into_extra_args( + &vars_and_secrets, + &mut stack.config.extra_args, + &mut global_replacers, + &mut secret_replacers, + )?; + + add_interp_update_log( &mut update, - ) - .await?; - } + &global_replacers, + &secret_replacers, + ); + + secret_replacers + } else { + Default::default() + }; let ComposeUpResponse { logs, @@ -87,6 +120,7 @@ impl Resolve for State { service: None, git_token, registry_token, + replacers: secret_replacers.into_iter().collect(), }) .await?; diff --git a/bin/core/src/api/execute/sync.rs b/bin/core/src/api/execute/sync.rs index 22c0398f3..f7a118584 100644 --- a/bin/core/src/api/execute/sync.rs +++ b/bin/core/src/api/execute/sync.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use anyhow::{anyhow, Context}; use formatting::{colored, format_serror, Color}; use mongo_indexed::doc; -use monitor_client::{ +use komodo_client::{ api::{execute::RunSync, write::RefreshResourceSyncPending}, entities::{ self, @@ -11,7 +11,7 @@ use monitor_client::{ build::Build, builder::Builder, deployment::Deployment, - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, procedure::Procedure, repo::Repo, @@ -387,7 +387,7 @@ impl Resolve for State { &sync.id, doc! { "$set": { - "info.last_sync_ts": monitor_timestamp(), + "info.last_sync_ts": komodo_timestamp(), "info.last_sync_hash": hash, "info.last_sync_message": message, } diff --git a/bin/core/src/api/read/alert.rs b/bin/core/src/api/read/alert.rs index 28f2cbdab..67bec5e66 100644 --- a/bin/core/src/api/read/alert.rs +++ b/bin/core/src/api/read/alert.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::read::{ GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse, }, diff --git a/bin/core/src/api/read/alerter.rs b/bin/core/src/api/read/alerter.rs index 9a825ac8e..c631df557 100644 --- a/bin/core/src/api/read/alerter.rs +++ b/bin/core/src/api/read/alerter.rs @@ -1,6 +1,6 @@ use anyhow::Context; use mongo_indexed::Document; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ alerter::{Alerter, AlerterListItem}, diff --git a/bin/core/src/api/read/build.rs b/bin/core/src/api/read/build.rs index 9122e115b..fc20c1242 100644 --- a/bin/core/src/api/read/build.rs +++ b/bin/core/src/api/read/build.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet}; use anyhow::Context; use async_timing_util::unix_timestamp_ms; use futures::TryStreamExt; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ build::{Build, BuildActionState, BuildListItem, BuildState}, diff --git a/bin/core/src/api/read/builder.rs b/bin/core/src/api/read/builder.rs index 4dad0db6f..02d8d2e1f 100644 --- a/bin/core/src/api/read/builder.rs +++ b/bin/core/src/api/read/builder.rs @@ -1,6 +1,6 @@ use anyhow::Context; use mongo_indexed::Document; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ builder::{Builder, BuilderListItem}, diff --git a/bin/core/src/api/read/deployment.rs b/bin/core/src/api/read/deployment.rs index 3df26278e..b8cc703b0 100644 --- a/bin/core/src/api/read/deployment.rs +++ b/bin/core/src/api/read/deployment.rs @@ -1,13 +1,14 @@ use std::{cmp, collections::HashSet}; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ deployment::{ Deployment, DeploymentActionState, DeploymentConfig, - DeploymentListItem, DeploymentState, DockerContainerStats, + DeploymentListItem, DeploymentState, }, + docker::container::ContainerStats, permission::PermissionLevel, server::Server, update::Log, @@ -84,10 +85,10 @@ impl Resolve for State { const MAX_LOG_LENGTH: u64 = 5000; -impl Resolve for State { +impl Resolve for State { async fn resolve( &self, - GetLog { deployment, tail }: GetLog, + GetDeploymentLog { deployment, tail }: GetDeploymentLog, user: User, ) -> anyhow::Result { let Deployment { @@ -114,15 +115,15 @@ impl Resolve for State { } } -impl Resolve for State { +impl Resolve for State { async fn resolve( &self, - SearchLog { + SearchDeploymentLog { deployment, terms, combinator, invert, - }: SearchLog, + }: SearchDeploymentLog, user: User, ) -> anyhow::Result { let Deployment { @@ -156,7 +157,7 @@ impl Resolve for State { &self, GetDeploymentStats { deployment }: GetDeploymentStats, user: User, - ) -> anyhow::Result { + ) -> anyhow::Result { let Deployment { name, config: DeploymentConfig { server_id, .. }, diff --git a/bin/core/src/api/read/mod.rs b/bin/core/src/api/read/mod.rs index 9c40dad80..27d95c9f4 100644 --- a/bin/core/src/api/read/mod.rs +++ b/bin/core/src/api/read/mod.rs @@ -3,7 +3,7 @@ use std::{collections::HashSet, sync::OnceLock, time::Instant}; use anyhow::{anyhow, Context}; use axum::{middleware, routing::post, Extension, Router}; use axum_extra::{headers::ContentType, TypedHeader}; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ build::Build, @@ -12,8 +12,8 @@ use monitor_client::{ repo::Repo, server::Server, sync::ResourceSync, - update::ResourceTarget, user::User, + ResourceTarget, }, }; use resolver_api::{ @@ -105,6 +105,14 @@ enum ReadRequest { GetHistoricalServerStats(GetHistoricalServerStats), ListServers(ListServers), ListFullServers(ListFullServers), + InspectDockerContainer(InspectDockerContainer), + GetResourceMatchingContainer(GetResourceMatchingContainer), + GetContainerLog(GetContainerLog), + SearchContainerLog(SearchContainerLog), + InspectDockerNetwork(InspectDockerNetwork), + InspectDockerImage(InspectDockerImage), + ListDockerImageHistory(ListDockerImageHistory), + InspectDockerVolume(InspectDockerVolume), #[to_string_resolver] ListDockerContainers(ListDockerContainers), #[to_string_resolver] @@ -112,6 +120,8 @@ enum ReadRequest { #[to_string_resolver] ListDockerImages(ListDockerImages), #[to_string_resolver] + ListDockerVolumes(ListDockerVolumes), + #[to_string_resolver] ListComposeProjects(ListComposeProjects), // ==== DEPLOYMENT ==== @@ -120,8 +130,8 @@ enum ReadRequest { GetDeploymentContainer(GetDeploymentContainer), GetDeploymentActionState(GetDeploymentActionState), GetDeploymentStats(GetDeploymentStats), - GetLog(GetLog), - SearchLog(SearchLog), + GetDeploymentLog(GetDeploymentLog), + SearchDeploymentLog(SearchDeploymentLog), ListDeployments(ListDeployments), ListFullDeployments(ListFullDeployments), ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs), diff --git a/bin/core/src/api/read/permission.rs b/bin/core/src/api/read/permission.rs index 0369c2e98..84c9d88c7 100644 --- a/bin/core/src/api/read/permission.rs +++ b/bin/core/src/api/read/permission.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::read::{ GetPermissionLevel, GetPermissionLevelResponse, ListPermissions, ListPermissionsResponse, ListUserTargetPermissions, diff --git a/bin/core/src/api/read/procedure.rs b/bin/core/src/api/read/procedure.rs index 98a9ad164..a565e4242 100644 --- a/bin/core/src/api/read/procedure.rs +++ b/bin/core/src/api/read/procedure.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ permission::PermissionLevel, diff --git a/bin/core/src/api/read/provider.rs b/bin/core/src/api/read/provider.rs index 5fcfc2e99..e808d9627 100644 --- a/bin/core/src/api/read/provider.rs +++ b/bin/core/src/api/read/provider.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context}; use mongo_indexed::{doc, Document}; -use monitor_client::{ +use komodo_client::{ api::read::{ GetDockerRegistryAccount, GetDockerRegistryAccountResponse, GetGitProviderAccount, GetGitProviderAccountResponse, diff --git a/bin/core/src/api/read/repo.rs b/bin/core/src/api/read/repo.rs index ff1347416..76b284a4b 100644 --- a/bin/core/src/api/read/repo.rs +++ b/bin/core/src/api/read/repo.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ config::core::CoreConfig, diff --git a/bin/core/src/api/read/search.rs b/bin/core/src/api/read/search.rs index a6cb0af68..3568ad8ab 100644 --- a/bin/core/src/api/read/search.rs +++ b/bin/core/src/api/read/search.rs @@ -1,9 +1,8 @@ -use monitor_client::{ +use komodo_client::{ api::read::{FindResources, FindResourcesResponse}, entities::{ build::Build, deployment::Deployment, procedure::Procedure, - repo::Repo, server::Server, update::ResourceTargetVariant, - user::User, + repo::Repo, server::Server, user::User, ResourceTargetVariant, }, }; use resolver_api::Resolve; diff --git a/bin/core/src/api/read/server.rs b/bin/core/src/api/read/server.rs index 424aa319e..f8bb61497 100644 --- a/bin/core/src/api/read/server.rs +++ b/bin/core/src/api/read/server.rs @@ -1,4 +1,5 @@ use std::{ + cmp, collections::HashMap, sync::{Arc, OnceLock}, }; @@ -7,26 +8,42 @@ use anyhow::{anyhow, Context}; use async_timing_util::{ get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS, }; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ + deployment::Deployment, + docker::{ + container::Container, + image::{Image, ImageHistoryResponseItem}, + network::Network, + volume::Volume, + }, permission::PermissionLevel, server::{ Server, ServerActionState, ServerListItem, ServerState, }, + stack::{Stack, StackServiceNames}, + update::Log, user::User, + ResourceTarget, }, }; use mungos::{ find::find_collect, mongodb::{bson::doc, options::FindOptions}, }; -use periphery_client::api as periphery; +use periphery_client::api::{ + self as periphery, + container::InspectContainer, + image::{ImageHistory, InspectImage}, + network::InspectNetwork, + volume::InspectVolume, +}; use resolver_api::{Resolve, ResolveToString}; use tokio::sync::Mutex; use crate::{ - helpers::periphery_client, + helpers::{periphery_client, stack::compose_container_match_regex}, resource, state::{action_states, db_client, server_status_cache, State}, }; @@ -326,10 +343,10 @@ impl Resolve for State { } } -impl ResolveToString for State { +impl ResolveToString for State { async fn resolve_to_string( &self, - ListDockerImages { server }: ListDockerImages, + ListDockerContainers { server }: ListDockerContainers, user: User, ) -> anyhow::Result { let server = resource::get_check_permissions::( @@ -341,8 +358,8 @@ impl ResolveToString for State { let cache = server_status_cache() .get_or_insert_default(&server.id) .await; - if let Some(images) = &cache.images { - serde_json::to_string(images) + if let Some(containers) = &cache.containers { + serde_json::to_string(containers) .context("failed to serialize response") } else { Ok(String::from("[]")) @@ -350,6 +367,152 @@ impl ResolveToString for State { } } +impl Resolve for State { + async fn resolve( + &self, + InspectDockerContainer { server, container }: InspectDockerContainer, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if cache.state != ServerState::Ok { + return Err(anyhow!( + "Cannot inspect container: server is {:?}", + cache.state + )); + } + periphery_client(&server)? + .request(InspectContainer { name: container }) + .await + } +} + +const MAX_LOG_LENGTH: u64 = 5000; + +impl Resolve for State { + async fn resolve( + &self, + GetContainerLog { + server, + container, + tail, + }: GetContainerLog, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + periphery_client(&server)? + .request(periphery::container::GetContainerLog { + name: container, + tail: cmp::min(tail, MAX_LOG_LENGTH), + }) + .await + .context("failed at call to periphery") + } +} + +impl Resolve for State { + async fn resolve( + &self, + SearchContainerLog { + server, + container, + terms, + combinator, + invert, + }: SearchContainerLog, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + periphery_client(&server)? + .request(periphery::container::GetContainerLogSearch { + name: container, + terms, + combinator, + invert, + }) + .await + .context("failed at call to periphery") + } +} + +impl Resolve for State { + async fn resolve( + &self, + GetResourceMatchingContainer { server, container }: GetResourceMatchingContainer, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + // first check deployments + if let Ok(deployment) = + resource::get::(&container).await + { + return Ok(GetResourceMatchingContainerResponse { + resource: ResourceTarget::Deployment(deployment.id).into(), + }); + } + + // then check stacks + let stacks = + resource::list_full_for_user_using_document::( + doc! { "config.server_id": &server.id }, + &user, + ) + .await?; + + // check matching stack + for stack in stacks { + for StackServiceNames { + service_name, + container_name, + } in stack + .info + .deployed_services + .unwrap_or(stack.info.latest_services) + { + let is_match = match compose_container_match_regex(&container_name) + .with_context(|| format!("failed to construct container name matching regex for service {service_name}")) + { + Ok(regex) => regex, + Err(e) => { + warn!("{e:#}"); + continue; + } + }.is_match(&container); + + if is_match { + return Ok(GetResourceMatchingContainerResponse { + resource: ResourceTarget::Stack(stack.id).into(), + }); + } + } + } + + Ok(GetResourceMatchingContainerResponse { resource: None }) + } +} + impl ResolveToString for State { async fn resolve_to_string( &self, @@ -374,10 +537,37 @@ impl ResolveToString for State { } } -impl ResolveToString for State { +impl Resolve for State { + async fn resolve( + &self, + InspectDockerNetwork { server, network }: InspectDockerNetwork, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if cache.state != ServerState::Ok { + return Err(anyhow!( + "Cannot inspect network: server is {:?}", + cache.state + )); + } + periphery_client(&server)? + .request(InspectNetwork { name: network }) + .await + } +} + +impl ResolveToString for State { async fn resolve_to_string( &self, - ListDockerContainers { server }: ListDockerContainers, + ListDockerImages { server }: ListDockerImages, user: User, ) -> anyhow::Result { let server = resource::get_check_permissions::( @@ -389,8 +579,8 @@ impl ResolveToString for State { let cache = server_status_cache() .get_or_insert_default(&server.id) .await; - if let Some(containers) = &cache.containers { - serde_json::to_string(containers) + if let Some(images) = &cache.images { + serde_json::to_string(images) .context("failed to serialize response") } else { Ok(String::from("[]")) @@ -398,6 +588,111 @@ impl ResolveToString for State { } } +impl Resolve for State { + async fn resolve( + &self, + InspectDockerImage { server, image }: InspectDockerImage, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if cache.state != ServerState::Ok { + return Err(anyhow!( + "Cannot inspect image: server is {:?}", + cache.state + )); + } + periphery_client(&server)? + .request(InspectImage { name: image }) + .await + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListDockerImageHistory { server, image }: ListDockerImageHistory, + user: User, + ) -> anyhow::Result> { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if cache.state != ServerState::Ok { + return Err(anyhow!( + "Cannot get image history: server is {:?}", + cache.state + )); + } + periphery_client(&server)? + .request(ImageHistory { name: image }) + .await + } +} + +impl ResolveToString for State { + async fn resolve_to_string( + &self, + ListDockerVolumes { server }: ListDockerVolumes, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if let Some(volumes) = &cache.volumes { + serde_json::to_string(volumes) + .context("failed to serialize response") + } else { + Ok(String::from("[]")) + } + } +} + +impl Resolve for State { + async fn resolve( + &self, + InspectDockerVolume { server, volume }: InspectDockerVolume, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if cache.state != ServerState::Ok { + return Err(anyhow!( + "Cannot inspect volume: server is {:?}", + cache.state + )); + } + periphery_client(&server)? + .request(InspectVolume { name: volume }) + .await + } +} + impl ResolveToString for State { async fn resolve_to_string( &self, diff --git a/bin/core/src/api/read/server_template.rs b/bin/core/src/api/read/server_template.rs index 7efe470aa..da0830bf9 100644 --- a/bin/core/src/api/read/server_template.rs +++ b/bin/core/src/api/read/server_template.rs @@ -1,6 +1,6 @@ use anyhow::Context; use mongo_indexed::Document; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ permission::PermissionLevel, server_template::ServerTemplate, diff --git a/bin/core/src/api/read/stack.rs b/bin/core/src/api/read/stack.rs index be7903156..2e9647ea4 100644 --- a/bin/core/src/api/read/stack.rs +++ b/bin/core/src/api/read/stack.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ config::core::CoreConfig, diff --git a/bin/core/src/api/read/sync.rs b/bin/core/src/api/read/sync.rs index e56909ae5..80a7c9c6e 100644 --- a/bin/core/src/api/read/sync.rs +++ b/bin/core/src/api/read/sync.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::read::*, entities::{ config::core::CoreConfig, diff --git a/bin/core/src/api/read/tag.rs b/bin/core/src/api/read/tag.rs index a56b54570..ba4fc3e2a 100644 --- a/bin/core/src/api/read/tag.rs +++ b/bin/core/src/api/read/tag.rs @@ -1,6 +1,6 @@ use anyhow::Context; use mongo_indexed::doc; -use monitor_client::{ +use komodo_client::{ api::read::{GetTag, ListTags}, entities::{tag::Tag, user::User}, }; diff --git a/bin/core/src/api/read/toml.rs b/bin/core/src/api/read/toml.rs index 2930e0a2d..4d3cee7bf 100644 --- a/bin/core/src/api/read/toml.rs +++ b/bin/core/src/api/read/toml.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::{ execute::Execution, read::{ @@ -30,8 +30,8 @@ use monitor_client::{ toml::{ PermissionToml, ResourceToml, ResourcesToml, UserGroupToml, }, - update::ResourceTarget, user::User, + ResourceTarget, }, }; use mungos::find::find_collect; @@ -42,7 +42,7 @@ use serde_json::Value; use crate::{ helpers::query::get_user_user_group_ids, - resource::{self, MonitorResource}, + resource::{self, KomodoResource}, state::{db_client, State}, }; @@ -355,7 +355,15 @@ impl Resolve for State { res.variables = find_collect(&db_client().await.variables, None, None) .await - .context("failed to get variables from db")?; + .context("failed to get variables from db")? + .into_iter() + .map(|mut variable| { + if !user.admin && variable.is_secret { + variable.value = "#".repeat(variable.value.len()) + } + variable + }) + .collect(); } let toml = serialize_resources_toml(&res) @@ -399,7 +407,7 @@ async fn add_procedure( .get(&exec.deployment) .unwrap_or(&String::new()), ), - Execution::StartContainer(exec) => { + Execution::StartDeployment(exec) => { exec.deployment.clone_from( names .deployments @@ -407,7 +415,7 @@ async fn add_procedure( .unwrap_or(&String::new()), ) } - Execution::RestartContainer(exec) => { + Execution::RestartDeployment(exec) => { exec.deployment.clone_from( names .deployments @@ -415,7 +423,7 @@ async fn add_procedure( .unwrap_or(&String::new()), ) } - Execution::PauseContainer(exec) => { + Execution::PauseDeployment(exec) => { exec.deployment.clone_from( names .deployments @@ -423,7 +431,7 @@ async fn add_procedure( .unwrap_or(&String::new()), ) } - Execution::UnpauseContainer(exec) => { + Execution::UnpauseDeployment(exec) => { exec.deployment.clone_from( names .deployments @@ -431,13 +439,15 @@ async fn add_procedure( .unwrap_or(&String::new()), ) } - Execution::StopContainer(exec) => exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ), - Execution::RemoveContainer(exec) => { + Execution::StopDeployment(exec) => { + exec.deployment.clone_from( + names + .deployments + .get(&exec.deployment) + .unwrap_or(&String::new()), + ) + } + Execution::DestroyDeployment(exec) => { exec.deployment.clone_from( names .deployments @@ -457,16 +467,69 @@ async fn add_procedure( Execution::CancelRepoBuild(exec) => exec.repo.clone_from( names.repos.get(&exec.repo).unwrap_or(&String::new()), ), + Execution::StartContainer(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::RestartContainer(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::PauseContainer(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::UnpauseContainer(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::StopContainer(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::DestroyContainer(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::StartAllContainers(exec) => { + exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ) + } + Execution::RestartAllContainers(exec) => { + exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ) + } + Execution::PauseAllContainers(exec) => { + exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ) + } + Execution::UnpauseAllContainers(exec) => { + exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ) + } Execution::StopAllContainers(exec) => exec.server.clone_from( names.servers.get(&exec.server).unwrap_or(&String::new()), ), + Execution::PruneContainers(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::DeleteNetwork(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), Execution::PruneNetworks(exec) => exec.server.clone_from( names.servers.get(&exec.server).unwrap_or(&String::new()), ), + Execution::DeleteImage(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), Execution::PruneImages(exec) => exec.server.clone_from( names.servers.get(&exec.server).unwrap_or(&String::new()), ), - Execution::PruneContainers(exec) => exec.server.clone_from( + Execution::DeleteVolume(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::PruneVolumes(exec) => exec.server.clone_from( + names.servers.get(&exec.server).unwrap_or(&String::new()), + ), + Execution::PruneSystem(exec) => exec.server.clone_from( names.servers.get(&exec.server).unwrap_or(&String::new()), ), Execution::RunSync(exec) => exec.sync.clone_from( @@ -679,7 +742,7 @@ async fn add_user_groups( Ok(()) } -fn convert_resource( +fn convert_resource( resource: Resource, tag_names: &HashMap, ) -> ResourceToml { diff --git a/bin/core/src/api/read/update.rs b/bin/core/src/api/read/update.rs index ad8ccc4b6..9aa308246 100644 --- a/bin/core/src/api/read/update.rs +++ b/bin/core/src/api/read/update.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::read::{GetUpdate, ListUpdates, ListUpdatesResponse}, entities::{ alerter::Alerter, @@ -15,8 +15,9 @@ use monitor_client::{ server_template::ServerTemplate, stack::Stack, sync::ResourceSync, - update::{ResourceTarget, Update, UpdateListItem}, + update::{Update, UpdateListItem}, user::User, + ResourceTarget, }, }; use mungos::{ diff --git a/bin/core/src/api/read/user.rs b/bin/core/src/api/read/user.rs index a28ba2bb9..f38c69cb5 100644 --- a/bin/core/src/api/read/user.rs +++ b/bin/core/src/api/read/user.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::read::{ FindUser, FindUserResponse, GetUsername, GetUsernameResponse, ListApiKeys, ListApiKeysForServiceUser, diff --git a/bin/core/src/api/read/user_group.rs b/bin/core/src/api/read/user_group.rs index 5bdf447e5..4cb3aabde 100644 --- a/bin/core/src/api/read/user_group.rs +++ b/bin/core/src/api/read/user_group.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use anyhow::Context; -use monitor_client::{ +use komodo_client::{ api::read::{ GetUserGroup, GetUserGroupResponse, ListUserGroups, ListUserGroupsResponse, diff --git a/bin/core/src/api/read/variable.rs b/bin/core/src/api/read/variable.rs index d4900b1cc..90acfc77b 100644 --- a/bin/core/src/api/read/variable.rs +++ b/bin/core/src/api/read/variable.rs @@ -1,6 +1,6 @@ use anyhow::Context; use mongo_indexed::doc; -use monitor_client::{ +use komodo_client::{ api::read::{ GetVariable, GetVariableResponse, ListVariables, ListVariablesResponse, @@ -19,9 +19,14 @@ impl Resolve for State { async fn resolve( &self, GetVariable { name }: GetVariable, - _: User, + user: User, ) -> anyhow::Result { - get_variable(&name).await + let mut variable = get_variable(&name).await?; + if !variable.is_secret || user.admin { + return Ok(variable); + } + variable.value = "#".repeat(variable.value.len()); + Ok(variable) } } @@ -29,14 +34,27 @@ impl Resolve for State { async fn resolve( &self, ListVariables {}: ListVariables, - _: User, + user: User, ) -> anyhow::Result { - find_collect( + let variables = find_collect( &db_client().await.variables, None, FindOptions::builder().sort(doc! { "name": 1 }).build(), ) .await - .context("failed to query db for variables") + .context("failed to query db for variables")?; + if user.admin { + return Ok(variables); + } + let variables = variables + .into_iter() + .map(|mut variable| { + if variable.is_secret { + variable.value = "#".repeat(variable.value.len()); + } + variable + }) + .collect(); + Ok(variables) } } diff --git a/bin/core/src/api/user.rs b/bin/core/src/api/user.rs index af76ec0ce..c6372dfde 100644 --- a/bin/core/src/api/user.rs +++ b/bin/core/src/api/user.rs @@ -3,16 +3,16 @@ use std::{collections::VecDeque, time::Instant}; use anyhow::{anyhow, Context}; use axum::{middleware, routing::post, Extension, Json, Router}; use axum_extra::{headers::ContentType, TypedHeader}; -use mongo_indexed::doc; -use monitor_client::{ +use komodo_client::{ api::user::{ CreateApiKey, CreateApiKeyResponse, DeleteApiKey, DeleteApiKeyResponse, PushRecentlyViewed, PushRecentlyViewedResponse, SetLastSeenUpdate, SetLastSeenUpdateResponse, }, - entities::{api_key::ApiKey, monitor_timestamp, user::User}, + entities::{api_key::ApiKey, komodo_timestamp, user::User}, }; +use mongo_indexed::doc; use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson}; use resolver_api::{derive::Resolver, Resolve, Resolver}; use serde::{Deserialize, Serialize}; @@ -132,7 +132,7 @@ impl Resolve for State { &db_client().await.users, &user.id, mungos::update::Update::Set(doc! { - "last_update_view": monitor_timestamp() + "last_update_view": komodo_timestamp() }), None, ) @@ -168,7 +168,7 @@ impl Resolve for State { key: key.clone(), secret: secret_hash, user_id: user.id.clone(), - created_at: monitor_timestamp(), + created_at: komodo_timestamp(), expires, }; db_client() diff --git a/bin/core/src/api/write/alerter.rs b/bin/core/src/api/write/alerter.rs index 4e881a92d..0182170d2 100644 --- a/bin/core/src/api/write/alerter.rs +++ b/bin/core/src/api/write/alerter.rs @@ -1,4 +1,4 @@ -use monitor_client::{ +use komodo_client::{ api::write::{ CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter, }, diff --git a/bin/core/src/api/write/build.rs b/bin/core/src/api/write/build.rs index 3a40282ae..948b7b5cd 100644 --- a/bin/core/src/api/write/build.rs +++ b/bin/core/src/api/write/build.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context}; use mongo_indexed::doc; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ build::{Build, BuildInfo, PartialBuildConfig}, @@ -131,6 +131,7 @@ impl Resolve for State { &[], "", None, + &[], ) .await .context("failed to clone build repo")?; diff --git a/bin/core/src/api/write/builder.rs b/bin/core/src/api/write/builder.rs index 2290e80fe..31b469d8a 100644 --- a/bin/core/src/api/write/builder.rs +++ b/bin/core/src/api/write/builder.rs @@ -1,4 +1,4 @@ -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ builder::Builder, permission::PermissionLevel, user::User, diff --git a/bin/core/src/api/write/deployment.rs b/bin/core/src/api/write/deployment.rs index 36031c24d..c7dea9cca 100644 --- a/bin/core/src/api/write/deployment.rs +++ b/bin/core/src/api/write/deployment.rs @@ -1,12 +1,12 @@ use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ deployment::{Deployment, DeploymentState}, - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, server::Server, - to_monitor_name, + to_komodo_name, update::Update, user::User, Operation, @@ -102,7 +102,7 @@ impl Resolve for State { let _action_guard = action_state.update(|state| state.renaming = true)?; - let name = to_monitor_name(&name); + let name = to_komodo_name(&name); let container_state = get_deployment_state(&deployment).await?; @@ -119,7 +119,7 @@ impl Resolve for State { &db_client().await.deployments, &deployment.id, mungos::update::Update::Set( - doc! { "name": &name, "updated_at": monitor_timestamp() }, + doc! { "name": &name, "updated_at": komodo_timestamp() }, ), None, ) diff --git a/bin/core/src/api/write/description.rs b/bin/core/src/api/write/description.rs index 74c06ca1d..fd777c2fe 100644 --- a/bin/core/src/api/write/description.rs +++ b/bin/core/src/api/write/description.rs @@ -1,11 +1,11 @@ use anyhow::anyhow; -use monitor_client::{ +use komodo_client::{ api::write::{UpdateDescription, UpdateDescriptionResponse}, entities::{ alerter::Alerter, build::Build, builder::Builder, deployment::Deployment, procedure::Procedure, repo::Repo, server::Server, server_template::ServerTemplate, stack::Stack, - sync::ResourceSync, update::ResourceTarget, user::User, + sync::ResourceSync, user::User, ResourceTarget, }, }; use resolver_api::Resolve; diff --git a/bin/core/src/api/write/mod.rs b/bin/core/src/api/write/mod.rs index f93591ff1..ab22bece6 100644 --- a/bin/core/src/api/write/mod.rs +++ b/bin/core/src/api/write/mod.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, Context}; use axum::{middleware, routing::post, Extension, Router}; use axum_extra::{headers::ContentType, TypedHeader}; use derive_variants::{EnumVariants, ExtractVariant}; -use monitor_client::{api::write::*, entities::user::User}; +use komodo_client::{api::write::*, entities::user::User}; use resolver_api::{derive::Resolver, Resolver}; use serde::{Deserialize, Serialize}; use serror::Json; @@ -68,7 +68,6 @@ pub enum WriteRequest { UpdateServer(UpdateServer), RenameServer(RenameServer), CreateNetwork(CreateNetwork), - DeleteNetwork(DeleteNetwork), // ==== DEPLOYMENT ==== CreateDeployment(CreateDeployment), @@ -148,6 +147,7 @@ pub enum WriteRequest { CreateVariable(CreateVariable), UpdateVariableValue(UpdateVariableValue), UpdateVariableDescription(UpdateVariableDescription), + UpdateVariableIsSecret(UpdateVariableIsSecret), DeleteVariable(DeleteVariable), // ==== PROVIDERS ==== diff --git a/bin/core/src/api/write/permissions.rs b/bin/core/src/api/write/permissions.rs index 0f9e86467..2614b7d3f 100644 --- a/bin/core/src/api/write/permissions.rs +++ b/bin/core/src/api/write/permissions.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::write::{ UpdatePermissionOnResourceType, UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget, @@ -10,8 +10,8 @@ use monitor_client::{ }, entities::{ permission::{UserTarget, UserTargetVariant}, - update::{ResourceTarget, ResourceTargetVariant}, user::User, + ResourceTarget, ResourceTargetVariant, }, }; use mungos::{ diff --git a/bin/core/src/api/write/procedure.rs b/bin/core/src/api/write/procedure.rs index f1d6cb5ed..d0549dd45 100644 --- a/bin/core/src/api/write/procedure.rs +++ b/bin/core/src/api/write/procedure.rs @@ -1,4 +1,4 @@ -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ permission::PermissionLevel, procedure::Procedure, user::User, diff --git a/bin/core/src/api/write/provider.rs b/bin/core/src/api/write/provider.rs index 246e13217..5b07ebf99 100644 --- a/bin/core/src/api/write/provider.rs +++ b/bin/core/src/api/write/provider.rs @@ -1,11 +1,10 @@ use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ provider::{DockerRegistryAccount, GitProviderAccount}, - update::ResourceTarget, user::User, - Operation, + Operation, ResourceTarget, }, }; use mungos::{ diff --git a/bin/core/src/api/write/repo.rs b/bin/core/src/api/write/repo.rs index 1783f1092..546679447 100644 --- a/bin/core/src/api/write/repo.rs +++ b/bin/core/src/api/write/repo.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context}; use mongo_indexed::doc; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ config::core::CoreConfig, @@ -134,6 +134,7 @@ impl Resolve for State { &[], "", None, + &[], ) .await .context("failed to clone repo (the resource) repo")?; diff --git a/bin/core/src/api/write/server.rs b/bin/core/src/api/write/server.rs index 2179f19a2..c6422989d 100644 --- a/bin/core/src/api/write/server.rs +++ b/bin/core/src/api/write/server.rs @@ -1,9 +1,9 @@ use anyhow::Context; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, server::Server, update::{Update, UpdateStatus}, @@ -73,7 +73,7 @@ impl Resolve for State { let mut update = make_update(&server, Operation::RenameServer, &user); - update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": monitor_timestamp() }), None) + update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None) .await .context("failed to update server on db. this name may already be taken.")?; update.push_simple_log( @@ -124,42 +124,3 @@ impl Resolve for State { Ok(update) } } - -impl Resolve for State { - #[instrument(name = "DeleteNetwork", skip(self, user))] - async fn resolve( - &self, - DeleteNetwork { server, name }: DeleteNetwork, - user: User, - ) -> anyhow::Result { - let server = resource::get_check_permissions::( - &server, - &user, - PermissionLevel::Write, - ) - .await?; - - let periphery = periphery_client(&server)?; - - let mut update = - make_update(&server, Operation::DeleteNetwork, &user); - update.status = UpdateStatus::InProgress; - update.id = add_update(update.clone()).await?; - - match periphery - .request(api::network::DeleteNetwork { name }) - .await - { - Ok(log) => update.logs.push(log), - Err(e) => update.push_error_log( - "delete network", - format_serror(&e.context("failed to delete network").into()), - ), - }; - - update.finalize(); - update_update(update.clone()).await?; - - Ok(update) - } -} diff --git a/bin/core/src/api/write/server_template.rs b/bin/core/src/api/write/server_template.rs index cd9e2d654..016470b15 100644 --- a/bin/core/src/api/write/server_template.rs +++ b/bin/core/src/api/write/server_template.rs @@ -1,4 +1,4 @@ -use monitor_client::{ +use komodo_client::{ api::write::{ CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate, diff --git a/bin/core/src/api/write/service_user.rs b/bin/core/src/api/write/service_user.rs index d1f09c7e1..adfeeb050 100644 --- a/bin/core/src/api/write/service_user.rs +++ b/bin/core/src/api/write/service_user.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::{ user::CreateApiKey, write::{ @@ -13,7 +13,7 @@ use monitor_client::{ }, }, entities::{ - monitor_timestamp, + komodo_timestamp, user::{User, UserConfig}, }, }; @@ -53,7 +53,7 @@ impl Resolve for State { last_update_view: 0, recents: Default::default(), all: Default::default(), - updated_at: monitor_timestamp(), + updated_at: komodo_timestamp(), }; user.id = db_client() .await diff --git a/bin/core/src/api/write/stack.rs b/bin/core/src/api/write/stack.rs index fc963e4e4..c5c62ecd9 100644 --- a/bin/core/src/api/write/stack.rs +++ b/bin/core/src/api/write/stack.rs @@ -1,10 +1,10 @@ use anyhow::{anyhow, Context}; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ config::core::CoreConfig, - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, server::ServerState, stack::{ComposeContents, PartialStackConfig, Stack, StackInfo}, @@ -29,7 +29,7 @@ use crate::{ config::core_config, helpers::{ periphery_client, - query::get_server_with_status, + query::get_server_with_state, stack::{ remote::get_remote_compose_contents, services::extract_services_into_res, @@ -112,7 +112,7 @@ impl Resolve for State { &db_client().await.stacks, &stack.id, mungos::update::Update::Set( - doc! { "name": &name, "updated_at": monitor_timestamp() }, + doc! { "name": &name, "updated_at": komodo_timestamp() }, ), None, ) @@ -177,7 +177,7 @@ impl Resolve for State { (vec![], None, None, None, None) } else { let (server, status) = - get_server_with_status(&stack.config.server_id).await?; + get_server_with_state(&stack.config.server_id).await?; if status != ServerState::Ok { (vec![], None, None, None, None) } else { diff --git a/bin/core/src/api/write/sync.rs b/bin/core/src/api/write/sync.rs index 5caf71106..e9d2ac0cc 100644 --- a/bin/core/src/api/write/sync.rs +++ b/bin/core/src/api/write/sync.rs @@ -2,21 +2,21 @@ use std::collections::HashMap; use anyhow::{anyhow, Context}; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::write::*, entities::{ self, - alert::{Alert, AlertData}, + alert::{Alert, AlertData, SeverityLevel}, alerter::Alerter, build::Build, builder::Builder, config::core::CoreConfig, deployment::Deployment, - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, procedure::Procedure, repo::Repo, - server::{stats::SeverityLevel, Server}, + server::Server, server_template::ServerTemplate, stack::Stack, sync::{ @@ -24,7 +24,7 @@ use monitor_client::{ PendingSyncUpdatesData, PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk, ResourceSync, }, - update::ResourceTarget, + ResourceTarget, user::User, NoData, }, @@ -328,7 +328,7 @@ impl Resolve for State { (None, true) => { let alert = Alert { id: Default::default(), - ts: monitor_timestamp(), + ts: komodo_timestamp(), resolved: false, level: SeverityLevel::Ok, target: ResourceTarget::ResourceSync(id.clone()), @@ -351,7 +351,7 @@ impl Resolve for State { doc! { "$set": { "resolved": true, - "resolved_ts": monitor_timestamp() + "resolved_ts": komodo_timestamp() } }, None, diff --git a/bin/core/src/api/write/tag.rs b/bin/core/src/api/write/tag.rs index a7e0d8593..f4dd1c01a 100644 --- a/bin/core/src/api/write/tag.rs +++ b/bin/core/src/api/write/tag.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::write::{ CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource, UpdateTagsOnResourceResponse, @@ -11,7 +11,7 @@ use monitor_client::{ deployment::Deployment, permission::PermissionLevel, procedure::Procedure, repo::Repo, server::Server, server_template::ServerTemplate, stack::Stack, - sync::ResourceSync, tag::Tag, update::ResourceTarget, user::User, + sync::ResourceSync, tag::Tag, user::User, ResourceTarget, }, }; use mungos::{ @@ -59,6 +59,7 @@ impl Resolve for State { } impl Resolve for State { + #[instrument(name = "RenameTag", skip(self, user))] async fn resolve( &self, RenameTag { id, name }: RenameTag, diff --git a/bin/core/src/api/write/user_group.rs b/bin/core/src/api/write/user_group.rs index 0c0b61bbe..3d886d18c 100644 --- a/bin/core/src/api/write/user_group.rs +++ b/bin/core/src/api/write/user_group.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, str::FromStr}; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::write::{ AddUserToUserGroup, CreateUserGroup, DeleteUserGroup, RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup, }, - entities::{monitor_timestamp, user::User, user_group::UserGroup}, + entities::{komodo_timestamp, user::User, user_group::UserGroup}, }; use mungos::{ by_id::{delete_one_by_id, find_one_by_id, update_one_by_id}, @@ -30,7 +30,7 @@ impl Resolve for State { id: Default::default(), users: Default::default(), all: Default::default(), - updated_at: monitor_timestamp(), + updated_at: komodo_timestamp(), name, }; let db = db_client().await; diff --git a/bin/core/src/api/write/variable.rs b/bin/core/src/api/write/variable.rs index 388e93b77..51eb269b3 100644 --- a/bin/core/src/api/write/variable.rs +++ b/bin/core/src/api/write/variable.rs @@ -1,13 +1,14 @@ use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::write::{ CreateVariable, CreateVariableResponse, DeleteVariable, DeleteVariableResponse, UpdateVariableDescription, - UpdateVariableDescriptionResponse, UpdateVariableValue, + UpdateVariableDescriptionResponse, UpdateVariableIsSecret, + UpdateVariableIsSecretResponse, UpdateVariableValue, UpdateVariableValueResponse, }, entities::{ - update::ResourceTarget, user::User, variable::Variable, Operation, + user::User, variable::Variable, Operation, ResourceTarget, }, }; use mungos::mongodb::bson::doc; @@ -22,12 +23,14 @@ use crate::{ }; impl Resolve for State { + #[instrument(name = "CreateVariable", skip(self, user, value))] async fn resolve( &self, CreateVariable { name, value, description, + is_secret, }: CreateVariable, user: User, ) -> anyhow::Result { @@ -39,6 +42,7 @@ impl Resolve for State { name, value, description, + is_secret, }; db_client() @@ -65,6 +69,7 @@ impl Resolve for State { } impl Resolve for State { + #[instrument(name = "UpdateVariableValue", skip(self, user, value))] async fn resolve( &self, UpdateVariableValue { name, value }: UpdateVariableValue, @@ -96,13 +101,19 @@ impl Resolve for State { &user, ); - update.push_simple_log( - "update variable value", + let log = if variable.is_secret { + format!( + "variable: '{name}'\nfrom: {}\nto: {value}", + variable.value.replace(|_| true, "#") + ) + } else { format!( "variable: '{name}'\nfrom: {}\nto: {value}", variable.value - ), - ); + ) + }; + + update.push_simple_log("update variable value", log); update.finalize(); add_update(update).await?; @@ -112,6 +123,7 @@ impl Resolve for State { } impl Resolve for State { + #[instrument(name = "UpdateVariableDescription", skip(self, user))] async fn resolve( &self, UpdateVariableDescription { name, description }: UpdateVariableDescription, @@ -133,6 +145,29 @@ impl Resolve for State { } } +impl Resolve for State { + #[instrument(name = "UpdateVariableIsSecret", skip(self, user))] + async fn resolve( + &self, + UpdateVariableIsSecret { name, is_secret }: UpdateVariableIsSecret, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!("only admins can update variables")); + } + db_client() + .await + .variables + .update_one( + doc! { "name": &name }, + doc! { "$set": { "is_secret": is_secret } }, + ) + .await + .context("failed to update variable is secret on db")?; + get_variable(&name).await + } +} + impl Resolve for State { async fn resolve( &self, diff --git a/bin/core/src/auth/github/client.rs b/bin/core/src/auth/github/client.rs index 0afb90c39..00620e27c 100644 --- a/bin/core/src/auth/github/client.rs +++ b/bin/core/src/auth/github/client.rs @@ -1,7 +1,7 @@ use std::sync::OnceLock; use anyhow::{anyhow, Context}; -use monitor_client::entities::config::core::{ +use komodo_client::entities::config::core::{ CoreConfig, OauthCredentials, }; use reqwest::StatusCode; diff --git a/bin/core/src/auth/github/mod.rs b/bin/core/src/auth/github/mod.rs index 4688c8f64..214a6ee3e 100644 --- a/bin/core/src/auth/github/mod.rs +++ b/bin/core/src/auth/github/mod.rs @@ -3,8 +3,8 @@ use axum::{ extract::Query, response::Redirect, routing::get, Router, }; use mongo_indexed::Document; -use monitor_client::entities::{ - monitor_timestamp, +use komodo_client::entities::{ + komodo_timestamp, user::{User, UserConfig}, }; use mungos::mongodb::bson::doc; @@ -75,7 +75,7 @@ async fn callback( .generate(user.id) .context("failed to generate jwt")?, None => { - let ts = monitor_timestamp(); + let ts = komodo_timestamp(); let no_users_exist = db_client.users.find_one(Document::new()).await?.is_none(); let user = User { diff --git a/bin/core/src/auth/google/client.rs b/bin/core/src/auth/google/client.rs index 6dc2eb560..725fed665 100644 --- a/bin/core/src/auth/google/client.rs +++ b/bin/core/src/auth/google/client.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::{anyhow, Context}; use jwt::Token; -use monitor_client::entities::config::core::{ +use komodo_client::entities::config::core::{ CoreConfig, OauthCredentials, }; use reqwest::StatusCode; @@ -73,7 +73,7 @@ impl GoogleOauthClient { client_id: id.clone(), client_secret: secret.clone(), redirect_uri: format!("{host}/auth/google/callback"), - user_agent: String::from("monitor"), + user_agent: String::from("komodo"), states: Default::default(), scopes, } diff --git a/bin/core/src/auth/google/mod.rs b/bin/core/src/auth/google/mod.rs index 36bf323f5..b9252e74f 100644 --- a/bin/core/src/auth/google/mod.rs +++ b/bin/core/src/auth/google/mod.rs @@ -4,7 +4,7 @@ use axum::{ extract::Query, response::Redirect, routing::get, Router, }; use mongo_indexed::Document; -use monitor_client::entities::user::{User, UserConfig}; +use komodo_client::entities::user::{User, UserConfig}; use mungos::mongodb::bson::doc; use reqwest::StatusCode; use serde::Deserialize; diff --git a/bin/core/src/auth/jwt.rs b/bin/core/src/auth/jwt.rs index b9b5a05a1..1b4a51db3 100644 --- a/bin/core/src/auth/jwt.rs +++ b/bin/core/src/auth/jwt.rs @@ -6,7 +6,7 @@ use async_timing_util::{ }; use hmac::{Hmac, Mac}; use jwt::SignWithKey; -use monitor_client::entities::config::core::CoreConfig; +use komodo_client::entities::config::core::CoreConfig; use mungos::mongodb::bson::doc; use serde::{Deserialize, Serialize}; use sha2::Sha256; diff --git a/bin/core/src/auth/local.rs b/bin/core/src/auth/local.rs index 0969ba5f3..4c16f2a24 100644 --- a/bin/core/src/auth/local.rs +++ b/bin/core/src/auth/local.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, Context}; use async_timing_util::unix_timestamp_ms; use axum::http::HeaderMap; use mongo_indexed::Document; -use monitor_client::{ +use komodo_client::{ api::auth::{ CreateLocalUser, CreateLocalUserResponse, LoginLocalUser, LoginLocalUserResponse, diff --git a/bin/core/src/auth/mod.rs b/bin/core/src/auth/mod.rs index 9cfe5f35c..229368fcd 100644 --- a/bin/core/src/auth/mod.rs +++ b/bin/core/src/auth/mod.rs @@ -5,7 +5,7 @@ use axum::{ extract::Request, http::HeaderMap, middleware::Next, response::Response, }; -use monitor_client::entities::{monitor_timestamp, user::User}; +use komodo_client::entities::{komodo_timestamp, user::User}; use mungos::mongodb::bson::doc; use reqwest::StatusCode; use serde::Deserialize; @@ -122,7 +122,7 @@ pub async fn auth_api_key_get_user_id( .await .context("failed to query db")? .context("no api key matching key")?; - if key.expires != 0 && key.expires < monitor_timestamp() { + if key.expires != 0 && key.expires < komodo_timestamp() { return Err(anyhow!("api key expired")); } if bcrypt::verify(secret, &key.secret) diff --git a/bin/core/src/cloud/aws/ec2.rs b/bin/core/src/cloud/aws/ec2.rs index add5cee13..bcd91a88b 100644 --- a/bin/core/src/cloud/aws/ec2.rs +++ b/bin/core/src/cloud/aws/ec2.rs @@ -12,12 +12,11 @@ use aws_sdk_ec2::{ Client, }; use base64::Engine; -use monitor_client::entities::{ - alert::{Alert, AlertData}, - monitor_timestamp, - server::stats::SeverityLevel, +use komodo_client::entities::{ + alert::{Alert, AlertData, SeverityLevel}, + komodo_timestamp, server_template::aws::AwsServerTemplateConfig, - update::ResourceTarget, + ResourceTarget, }; use crate::{config::core_config, helpers::alert::send_alerts}; @@ -171,7 +170,7 @@ pub async fn terminate_ec2_instance_with_retry( error!("failed to terminate aws instance {instance_id}."); let alert = Alert { id: Default::default(), - ts: monitor_timestamp(), + ts: komodo_timestamp(), resolved: false, level: SeverityLevel::Critical, target: ResourceTarget::system(), diff --git a/bin/core/src/cloud/hetzner/common.rs b/bin/core/src/cloud/hetzner/common.rs index d8213da69..8c0b91096 100644 --- a/bin/core/src/cloud/hetzner/common.rs +++ b/bin/core/src/cloud/hetzner/common.rs @@ -162,6 +162,8 @@ pub enum HetznerLocation { Ashburn, #[serde(rename = "hil")] Hillsboro, + #[serde(rename = "sin")] + Singapore, } #[derive(Debug, Clone, Copy, Serialize, Deserialize)] @@ -176,6 +178,8 @@ pub enum HetznerDatacenter { AshburnDc1, #[serde(rename = "hil-dc1")] HillsboroDc1, + #[serde(rename = "sin-dc1")] + SingaporeDc1, } impl From for HetznerLocation { @@ -188,6 +192,7 @@ impl From for HetznerLocation { } HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn, HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro, + HetznerDatacenter::SingaporeDc1 => HetznerLocation::Singapore, } } } diff --git a/bin/core/src/cloud/hetzner/mod.rs b/bin/core/src/cloud/hetzner/mod.rs index c63e9a61d..7932938ec 100644 --- a/bin/core/src/cloud/hetzner/mod.rs +++ b/bin/core/src/cloud/hetzner/mod.rs @@ -5,7 +5,7 @@ use std::{ use anyhow::{anyhow, Context}; use futures::future::join_all; -use monitor_client::entities::server_template::hetzner::{ +use komodo_client::entities::server_template::hetzner::{ HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType, HetznerVolumeFormat, }; @@ -209,6 +209,9 @@ fn hetzner_datacenter( HetznerDatacenter::HillsboroDc1 => { common::HetznerDatacenter::HillsboroDc1 } + HetznerDatacenter::SingaporeDc1 => { + common::HetznerDatacenter::SingaporeDc1 + } } } diff --git a/bin/core/src/config.rs b/bin/core/src/config.rs index 484732709..4f4318626 100644 --- a/bin/core/src/config.rs +++ b/bin/core/src/config.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::Context; use merge_config_files::parse_config_file; -use monitor_client::entities::{ +use komodo_client::entities::{ config::core::{ AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig, GithubWebhookAppInstallationConfig, HetznerCredentials, @@ -16,7 +16,7 @@ pub fn frontend_path() -> &'static String { #[derive(Deserialize)] struct FrontendEnv { #[serde(default = "default_frontend_path")] - monitor_frontend_path: String, + komodo_frontend_path: String, } fn default_frontend_path() -> String { @@ -26,11 +26,11 @@ pub fn frontend_path() -> &'static String { static FRONTEND_PATH: OnceLock = OnceLock::new(); FRONTEND_PATH.get_or_init(|| { let FrontendEnv { - monitor_frontend_path, + komodo_frontend_path, } = envy::from_env() .context("failed to parse FrontendEnv") .unwrap(); - monitor_frontend_path + komodo_frontend_path }) } @@ -44,16 +44,16 @@ pub fn core_config() -> &'static CoreConfig { panic!("{e:#?}"); } }; - let config_path = &env.monitor_config_path; + let config_path = &env.komodo_config_path; let config = parse_config_file::(config_path.as_str()) .unwrap_or_else(|e| { panic!("failed at parsing config at {config_path} | {e:#}") }); - let installations = match (env.monitor_github_webhook_app_installations_ids, env.monitor_github_webhook_app_installations_namespaces) { + let installations = match (env.komodo_github_webhook_app_installations_ids, env.komodo_github_webhook_app_installations_namespaces) { (Some(ids), Some(namespaces)) => { if ids.len() != namespaces.len() { - panic!("MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}") + panic!("KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}") } ids .into_iter() @@ -65,7 +65,7 @@ pub fn core_config() -> &'static CoreConfig { .collect() }, (Some(_), None) | (None, Some(_)) => { - panic!("Got only one of MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided"); + panic!("Got only one of KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided"); } (None, None) => { config.github_webhook_app.installations @@ -73,129 +73,129 @@ pub fn core_config() -> &'static CoreConfig { }; // recreating CoreConfig here makes sure we apply all env overrides. CoreConfig { - title: env.monitor_title.unwrap_or(config.title), - host: env.monitor_host.unwrap_or(config.host), - port: env.monitor_port.unwrap_or(config.port), - passkey: env.monitor_passkey.unwrap_or(config.passkey), - ensure_server: env.monitor_ensure_server.unwrap_or(config.ensure_server), - jwt_secret: env.monitor_jwt_secret.unwrap_or(config.jwt_secret), + title: env.komodo_title.unwrap_or(config.title), + host: env.komodo_host.unwrap_or(config.host), + port: env.komodo_port.unwrap_or(config.port), + passkey: env.komodo_passkey.unwrap_or(config.passkey), + ensure_server: env.komodo_ensure_server.unwrap_or(config.ensure_server), + jwt_secret: env.komodo_jwt_secret.unwrap_or(config.jwt_secret), jwt_ttl: env - .monitor_jwt_ttl + .komodo_jwt_ttl .unwrap_or(config.jwt_ttl), repo_directory: env - .monitor_repo_directory + .komodo_repo_directory .map(|dir| dir.parse() - .context("failed to parse env MONITOR_REPO_DIRECTORY as valid path").unwrap()) + .context("failed to parse env komodo_REPO_DIRECTORY as valid path").unwrap()) .unwrap_or(config.repo_directory), stack_poll_interval: env - .monitor_stack_poll_interval + .komodo_stack_poll_interval .unwrap_or(config.stack_poll_interval), sync_poll_interval: env - .monitor_sync_poll_interval + .komodo_sync_poll_interval .unwrap_or(config.sync_poll_interval), build_poll_interval: env - .monitor_build_poll_interval + .komodo_build_poll_interval .unwrap_or(config.build_poll_interval), repo_poll_interval: env - .monitor_repo_poll_interval + .komodo_repo_poll_interval .unwrap_or(config.repo_poll_interval), monitoring_interval: env - .monitor_monitoring_interval + .komodo_monitoring_interval .unwrap_or(config.monitoring_interval), keep_stats_for_days: env - .monitor_keep_stats_for_days + .komodo_keep_stats_for_days .unwrap_or(config.keep_stats_for_days), keep_alerts_for_days: env - .monitor_keep_alerts_for_days + .komodo_keep_alerts_for_days .unwrap_or(config.keep_alerts_for_days), webhook_secret: env - .monitor_webhook_secret + .komodo_webhook_secret .unwrap_or(config.webhook_secret), webhook_base_url: env - .monitor_webhook_base_url + .komodo_webhook_base_url .or(config.webhook_base_url), transparent_mode: env - .monitor_transparent_mode + .komodo_transparent_mode .unwrap_or(config.transparent_mode), ui_write_disabled: env - .monitor_ui_write_disabled + .komodo_ui_write_disabled .unwrap_or(config.ui_write_disabled), - enable_new_users: env.monitor_enable_new_users + enable_new_users: env.komodo_enable_new_users .unwrap_or(config.enable_new_users), - local_auth: env.monitor_local_auth.unwrap_or(config.local_auth), + local_auth: env.komodo_local_auth.unwrap_or(config.local_auth), google_oauth: OauthCredentials { enabled: env - .monitor_google_oauth_enabled + .komodo_google_oauth_enabled .unwrap_or(config.google_oauth.enabled), id: env - .monitor_google_oauth_id + .komodo_google_oauth_id .unwrap_or(config.google_oauth.id), secret: env - .monitor_google_oauth_secret + .komodo_google_oauth_secret .unwrap_or(config.google_oauth.secret), }, github_oauth: OauthCredentials { enabled: env - .monitor_github_oauth_enabled + .komodo_github_oauth_enabled .unwrap_or(config.github_oauth.enabled), id: env - .monitor_github_oauth_id + .komodo_github_oauth_id .unwrap_or(config.github_oauth.id), secret: env - .monitor_github_oauth_secret + .komodo_github_oauth_secret .unwrap_or(config.github_oauth.secret), }, github_webhook_app: GithubWebhookAppConfig { app_id: env - .monitor_github_webhook_app_app_id + .komodo_github_webhook_app_app_id .unwrap_or(config.github_webhook_app.app_id), pk_path: env - .monitor_github_webhook_app_pk_path + .komodo_github_webhook_app_pk_path .unwrap_or(config.github_webhook_app.pk_path), installations, }, aws: AwsCredentials { access_key_id: env - .monitor_aws_access_key_id + .komodo_aws_access_key_id .unwrap_or(config.aws.access_key_id), secret_access_key: env - .monitor_aws_secret_access_key + .komodo_aws_secret_access_key .unwrap_or(config.aws.secret_access_key), }, hetzner: HetznerCredentials { token: env - .monitor_hetzner_token + .komodo_hetzner_token .unwrap_or(config.hetzner.token), }, mongo: MongoConfig { - uri: env.monitor_mongo_uri.or(config.mongo.uri), - address: env.monitor_mongo_address.or(config.mongo.address), + uri: env.komodo_mongo_uri.or(config.mongo.uri), + address: env.komodo_mongo_address.or(config.mongo.address), username: env - .monitor_mongo_username + .komodo_mongo_username .or(config.mongo.username), password: env - .monitor_mongo_password + .komodo_mongo_password .or(config.mongo.password), app_name: env - .monitor_mongo_app_name + .komodo_mongo_app_name .unwrap_or(config.mongo.app_name), db_name: env - .monitor_mongo_db_name + .komodo_mongo_db_name .unwrap_or(config.mongo.db_name), }, logging: LogConfig { level: env - .monitor_logging_level + .komodo_logging_level .unwrap_or(config.logging.level), stdio: env - .monitor_logging_stdio + .komodo_logging_stdio .unwrap_or(config.logging.stdio), otlp_endpoint: env - .monitor_logging_otlp_endpoint + .komodo_logging_otlp_endpoint .or(config.logging.otlp_endpoint), opentelemetry_service_name: env - .monitor_logging_opentelemetry_service_name + .komodo_logging_opentelemetry_service_name .unwrap_or(config.logging.opentelemetry_service_name), }, diff --git a/bin/core/src/db.rs b/bin/core/src/db.rs index 93abfd540..34168f4dc 100644 --- a/bin/core/src/db.rs +++ b/bin/core/src/db.rs @@ -1,5 +1,5 @@ use mongo_indexed::{create_index, create_unique_index}; -use monitor_client::entities::{ +use komodo_client::entities::{ alert::Alert, alerter::Alerter, api_key::ApiKey, @@ -11,9 +11,10 @@ use monitor_client::entities::{ procedure::Procedure, provider::{DockerRegistryAccount, GitProviderAccount}, repo::Repo, - server::{stats::SystemStatsRecord, Server}, + server::Server, server_template::ServerTemplate, stack::Stack, + stats::SystemStatsRecord, sync::ResourceSync, tag::Tag, update::Update, diff --git a/bin/core/src/helpers/action_state.rs b/bin/core/src/helpers/action_state.rs index 2d4ae3148..e27afb8ef 100644 --- a/bin/core/src/helpers/action_state.rs +++ b/bin/core/src/helpers/action_state.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use anyhow::anyhow; -use monitor_client::{ +use komodo_client::{ busy::Busy, entities::{ build::BuildActionState, deployment::DeploymentActionState, diff --git a/bin/core/src/helpers/alert.rs b/bin/core/src/helpers/alert.rs index 3f232c95f..4e52758dc 100644 --- a/bin/core/src/helpers/alert.rs +++ b/bin/core/src/helpers/alert.rs @@ -1,13 +1,12 @@ use anyhow::{anyhow, Context}; use derive_variants::ExtractVariant; use futures::future::join_all; -use monitor_client::entities::{ - alert::{Alert, AlertData}, +use komodo_client::entities::{ + alert::{Alert, AlertData, SeverityLevel}, alerter::*, deployment::DeploymentState, - server::stats::SeverityLevel, stack::StackState, - update::ResourceTargetVariant, + ResourceTargetVariant, }; use mungos::{find::find_collect, mongodb::bson::doc}; use slack::types::Block; diff --git a/bin/core/src/helpers/build.rs b/bin/core/src/helpers/build.rs index e52cea32b..cae7a8080 100644 --- a/bin/core/src/helpers/build.rs +++ b/bin/core/src/helpers/build.rs @@ -1,5 +1,5 @@ use async_timing_util::{wait_until_timelength, Timelength}; -use monitor_client::{ +use komodo_client::{ api::write::RefreshBuildCache, entities::user::build_user, }; use mungos::find::find_collect; diff --git a/bin/core/src/helpers/builder.rs b/bin/core/src/helpers/builder.rs index 8543b577b..fc28c5d58 100644 --- a/bin/core/src/helpers/builder.rs +++ b/bin/core/src/helpers/builder.rs @@ -2,9 +2,9 @@ use std::time::Duration; use anyhow::{anyhow, Context}; use formatting::muted; -use monitor_client::entities::{ +use komodo_client::entities::{ builder::{AwsBuilderConfig, Builder, BuilderConfig}, - monitor_timestamp, + komodo_timestamp, server::Server, server_template::aws::AwsServerTemplateConfig, update::{Log, Update}, @@ -68,7 +68,7 @@ async fn get_aws_builder( config: AwsBuilderConfig, update: &mut Update, ) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> { - let start_create_ts = monitor_timestamp(); + let start_create_ts = komodo_timestamp(); let version = version.map(|v| format!("-v{v}")).unwrap_or_default(); let instance_name = format!("BUILDER-{resource_name}{version}"); @@ -85,7 +85,7 @@ async fn get_aws_builder( success: true, stdout: start_aws_builder_log(&instance_id, &ip, &config), start_ts: start_create_ts, - end_ts: monitor_timestamp(), + end_ts: komodo_timestamp(), ..Default::default() }; @@ -97,7 +97,7 @@ async fn get_aws_builder( let periphery = PeripheryClient::new(&periphery_address, &core_config().passkey); - let start_connect_ts = monitor_timestamp(); + let start_connect_ts = komodo_timestamp(); let mut res = Ok(GetVersionResponse { version: String::new(), }); @@ -115,7 +115,7 @@ async fn get_aws_builder( version ), start_ts: start_connect_ts, - end_ts: monitor_timestamp(), + end_ts: komodo_timestamp(), ..Default::default() }; update.logs.push(connect_log); diff --git a/bin/core/src/helpers/cache.rs b/bin/core/src/helpers/cache.rs index 735239a32..01416c692 100644 --- a/bin/core/src/helpers/cache.rs +++ b/bin/core/src/helpers/cache.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, hash::Hash}; -use monitor_client::busy::Busy; +use komodo_client::busy::Busy; use tokio::sync::RwLock; #[derive(Default)] diff --git a/bin/core/src/helpers/channel.rs b/bin/core/src/helpers/channel.rs index e7e7b8fa0..3e591bba6 100644 --- a/bin/core/src/helpers/channel.rs +++ b/bin/core/src/helpers/channel.rs @@ -1,6 +1,6 @@ use std::sync::OnceLock; -use monitor_client::entities::update::{Update, UpdateListItem}; +use komodo_client::entities::update::{Update, UpdateListItem}; use tokio::sync::{broadcast, Mutex}; /// A channel sending (build_id, update_id) diff --git a/bin/core/src/helpers/interpolate.rs b/bin/core/src/helpers/interpolate.rs new file mode 100644 index 000000000..beed62cf6 --- /dev/null +++ b/bin/core/src/helpers/interpolate.rs @@ -0,0 +1,222 @@ +use std::collections::HashSet; + +use anyhow::Context; +use komodo_client::entities::{ + update::Update, EnvironmentVar, SystemCommand, +}; + +use super::query::VariablesAndSecrets; + +pub fn interpolate_variables_secrets_into_environment( + VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, + environment: &mut Vec, + global_replacers: &mut HashSet<(String, String)>, + secret_replacers: &mut HashSet<(String, String)>, +) -> anyhow::Result<()> { + for env in environment { + if env.value.is_empty() { + continue; + } + + // first pass - global variables + let (res, more_replacers) = svi::interpolate_variables( + &env.value, + variables, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate global variables into env var '{}'", + env.variable + ) + })?; + global_replacers.extend(more_replacers); + + // second pass - core secrets + let (res, more_replacers) = svi::interpolate_variables( + &res, + secrets, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate core secrets into env var '{}'", + env.variable + ) + })?; + secret_replacers.extend(more_replacers); + + // set env value with the result + env.value = res; + } + + Ok(()) +} + +pub fn interpolate_variables_secrets_into_extra_args( + VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, + extra_args: &mut Vec, + global_replacers: &mut HashSet<(String, String)>, + secret_replacers: &mut HashSet<(String, String)>, +) -> anyhow::Result<()> { + for arg in extra_args { + if arg.is_empty() { + continue; + } + + // first pass - global variables + let (res, more_replacers) = svi::interpolate_variables( + arg, + variables, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate global variables into extra arg '{arg}'", + ) + })?; + global_replacers.extend(more_replacers); + + // second pass - core secrets + let (res, more_replacers) = svi::interpolate_variables( + &res, + secrets, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate core secrets into extra arg '{arg}'", + ) + })?; + secret_replacers.extend(more_replacers); + + // set arg with the result + *arg = res; + } + + Ok(()) +} + +pub fn interpolate_variables_secrets_into_container_command( + VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, + command: &mut String, + global_replacers: &mut HashSet<(String, String)>, + secret_replacers: &mut HashSet<(String, String)>, +) -> anyhow::Result<()> { + if command.is_empty() { + return Ok(()); + } + + // first pass - global variables + let (res, more_replacers) = svi::interpolate_variables( + command, + variables, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate global variables into command '{command}'", + ) + })?; + global_replacers.extend(more_replacers); + + // second pass - core secrets + let (res, more_replacers) = svi::interpolate_variables( + &res, + secrets, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate core secrets into command '{command}'", + ) + })?; + secret_replacers.extend(more_replacers); + + // set command with the result + *command = res; + + Ok(()) +} + +pub fn interpolate_variables_secrets_into_system_command( + VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, + command: &mut SystemCommand, + global_replacers: &mut HashSet<(String, String)>, + secret_replacers: &mut HashSet<(String, String)>, +) -> anyhow::Result<()> { + if command.command.is_empty() { + return Ok(()); + } + + // first pass - global variables + let (res, more_replacers) = svi::interpolate_variables( + &command.command, + variables, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate global variables into command '{}'", + command.command + ) + })?; + global_replacers.extend(more_replacers); + + // second pass - core secrets + let (res, more_replacers) = svi::interpolate_variables( + &res, + secrets, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate core secrets into command '{}'", + command.command + ) + })?; + secret_replacers.extend(more_replacers); + + // set command with the result + command.command = res; + + Ok(()) +} + +pub fn add_interp_update_log( + update: &mut Update, + global_replacers: &HashSet<(String, String)>, + secret_replacers: &HashSet<(String, String)>, +) { + // Show which variables were interpolated + if !global_replacers.is_empty() { + update.push_simple_log( + "interpolate global variables", + global_replacers + .iter() + .map(|(value, variable)| format!("{variable} => {value}")) + .collect::>() + .join("\n"), + ); + } + + // Only show names of interpolated secrets + if !secret_replacers.is_empty() { + update.push_simple_log( + "interpolate core secrets", + secret_replacers + .iter() + .map(|(_, variable)| format!("replaced: {variable}")) + .collect::>() + .join("\n"), + ); + } +} diff --git a/bin/core/src/helpers/mod.rs b/bin/core/src/helpers/mod.rs index 60189beb1..3d4c756d8 100644 --- a/bin/core/src/helpers/mod.rs +++ b/bin/core/src/helpers/mod.rs @@ -1,18 +1,18 @@ -use std::{collections::HashSet, str::FromStr, time::Duration}; +use std::{str::FromStr, time::Duration}; use anyhow::{anyhow, Context}; use futures::future::join_all; use mongo_indexed::Document; -use monitor_client::{ +use komodo_client::{ api::write::CreateServer, entities::{ - monitor_timestamp, + komodo_timestamp, permission::{Permission, PermissionLevel, UserTarget}, server::{PartialServerConfig, Server}, sync::ResourceSync, - update::{Log, ResourceTarget, Update}, + update::Log, user::{system_user, User}, - EnvironmentVar, + ResourceTarget, }, }; use mungos::{ @@ -20,7 +20,6 @@ use mungos::{ mongodb::bson::{doc, oid::ObjectId, to_document, Bson}, }; use periphery_client::PeripheryClient; -use query::get_global_variables; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use resolver_api::Resolve; @@ -36,6 +35,7 @@ pub mod build; pub mod builder; pub mod cache; pub mod channel; +pub mod interpolate; pub mod procedure; pub mod prune; pub mod query; @@ -227,73 +227,6 @@ pub fn flatten_document(doc: Document) -> Document { target } -/// Returns the secret replacers -pub async fn interpolate_variables_secrets_into_environment( - environment: &mut Vec, - update: &mut Update, -) -> anyhow::Result> { - // Interpolate variables into environment - let variables = get_global_variables().await?; - let core_config = core_config(); - - let mut global_replacers = HashSet::new(); - let mut secret_replacers = HashSet::new(); - - for env in environment { - // first pass - global variables - let (res, more_replacers) = svi::interpolate_variables( - &env.value, - &variables, - svi::Interpolator::DoubleBrackets, - false, - ) - .with_context(|| { - format!( - "failed to interpolate global variables - {}", - env.variable - ) - })?; - global_replacers.extend(more_replacers); - // second pass - core secrets - let (res, more_replacers) = svi::interpolate_variables( - &res, - &core_config.secrets, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate core secrets")?; - secret_replacers.extend(more_replacers); - - // set env value with the result - env.value = res; - } - - // Show which variables were interpolated - if !global_replacers.is_empty() { - update.push_simple_log( - "interpolate global variables", - global_replacers - .into_iter() - .map(|(value, variable)| format!("{variable} => {value}")) - .collect::>() - .join("\n"), - ); - } - - if !secret_replacers.is_empty() { - update.push_simple_log( - "interpolate core secrets", - secret_replacers - .iter() - .map(|(_, variable)| format!("replaced: {variable}")) - .collect::>() - .join("\n"), - ); - } - - Ok(secret_replacers) -} - pub async fn startup_cleanup() { tokio::join!( startup_in_progress_update_cleanup(), @@ -304,8 +237,8 @@ pub async fn startup_cleanup() { /// Run on startup, as no updates should be in progress on startup async fn startup_in_progress_update_cleanup() { let log = Log::error( - "monitor shutdown", - String::from("Monitor shutdown during execution. If this is a build, the builder may not have been terminated.") + "Komodo shutdown", + String::from("Komodo shutdown during execution. If this is a build, the builder may not have been terminated.") ); // This static log won't fail to serialize, unwrap ok. let log = to_document(&log).unwrap(); @@ -373,7 +306,7 @@ async fn startup_open_alert_cleanup() { doc! { "_id": { "$in": to_update_ids } }, doc! { "$set": { "resolved": true, - "resolved_ts": monitor_timestamp() + "resolved_ts": komodo_timestamp() } }, ) .await @@ -402,10 +335,11 @@ pub async fn ensure_server() { if server.is_some() { return; } + if let Err(e) = State .resolve( CreateServer { - name: String::from("default"), + name: format!("server-{}", random_string(5)), config: PartialServerConfig { address: Some(ensure_server.to_string()), enabled: Some(true), diff --git a/bin/core/src/helpers/procedure.rs b/bin/core/src/helpers/procedure.rs index 10f4af115..338562340 100644 --- a/bin/core/src/helpers/procedure.rs +++ b/bin/core/src/helpers/procedure.rs @@ -3,7 +3,7 @@ use std::time::{Duration, Instant}; use anyhow::{anyhow, Context}; use formatting::{bold, colored, format_serror, muted, Color}; use futures::future::join_all; -use monitor_client::{ +use komodo_client::{ api::execute::Execution, entities::{ procedure::Procedure, @@ -193,6 +193,166 @@ async fn execute_execution( ) .await? } + Execution::StartDeployment(req) => { + let req = ExecuteRequest::StartDeployment(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::StartDeployment(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at StartDeployment"), + &update_id, + ) + .await? + } + Execution::RestartDeployment(req) => { + let req = ExecuteRequest::RestartDeployment(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::RestartDeployment(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at RestartDeployment"), + &update_id, + ) + .await? + } + Execution::PauseDeployment(req) => { + let req = ExecuteRequest::PauseDeployment(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PauseDeployment(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PauseDeployment"), + &update_id, + ) + .await? + } + Execution::UnpauseDeployment(req) => { + let req = ExecuteRequest::UnpauseDeployment(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::UnpauseDeployment(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at UnpauseDeployment"), + &update_id, + ) + .await? + } + Execution::StopDeployment(req) => { + let req = ExecuteRequest::StopDeployment(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::StopDeployment(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at StopDeployment"), + &update_id, + ) + .await? + } + Execution::DestroyDeployment(req) => { + let req = ExecuteRequest::DestroyDeployment(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::DestroyDeployment(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at RemoveDeployment"), + &update_id, + ) + .await? + } + Execution::CloneRepo(req) => { + let req = ExecuteRequest::CloneRepo(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::CloneRepo(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at CloneRepo"), + &update_id, + ) + .await? + } + Execution::PullRepo(req) => { + let req = ExecuteRequest::PullRepo(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PullRepo(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PullRepo"), + &update_id, + ) + .await? + } + Execution::BuildRepo(req) => { + let req = ExecuteRequest::BuildRepo(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::BuildRepo(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at BuildRepo"), + &update_id, + ) + .await? + } + Execution::CancelRepoBuild(req) => { + let req = ExecuteRequest::CancelRepoBuild(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::CancelRepoBuild(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at CancelRepoBuild"), + &update_id, + ) + .await? + } Execution::StartContainer(req) => { let req = ExecuteRequest::StartContainer(req); let update = init_execution_update(&req, &user).await?; @@ -273,6 +433,86 @@ async fn execute_execution( ) .await? } + Execution::DestroyContainer(req) => { + let req = ExecuteRequest::DestroyContainer(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::DestroyContainer(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at RemoveContainer"), + &update_id, + ) + .await? + } + Execution::StartAllContainers(req) => { + let req = ExecuteRequest::StartAllContainers(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::StartAllContainers(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at StartAllContainers"), + &update_id, + ) + .await? + } + Execution::RestartAllContainers(req) => { + let req = ExecuteRequest::RestartAllContainers(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::RestartAllContainers(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at RestartAllContainers"), + &update_id, + ) + .await? + } + Execution::PauseAllContainers(req) => { + let req = ExecuteRequest::PauseAllContainers(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PauseAllContainers(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PauseAllContainers"), + &update_id, + ) + .await? + } + Execution::UnpauseAllContainers(req) => { + let req = ExecuteRequest::UnpauseAllContainers(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::UnpauseAllContainers(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at UnpauseAllContainers"), + &update_id, + ) + .await? + } Execution::StopAllContainers(req) => { let req = ExecuteRequest::StopAllContainers(req); let update = init_execution_update(&req, &user).await?; @@ -289,10 +529,10 @@ async fn execute_execution( ) .await? } - Execution::RemoveContainer(req) => { - let req = ExecuteRequest::RemoveContainer(req); + Execution::PruneContainers(req) => { + let req = ExecuteRequest::PruneContainers(req); let update = init_execution_update(&req, &user).await?; - let ExecuteRequest::RemoveContainer(req) = req else { + let ExecuteRequest::PruneContainers(req) = req else { unreachable!() }; let update_id = update.id.clone(); @@ -300,15 +540,15 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RemoveContainer"), + .context("failed at PruneContainers"), &update_id, ) .await? } - Execution::CloneRepo(req) => { - let req = ExecuteRequest::CloneRepo(req); + Execution::DeleteNetwork(req) => { + let req = ExecuteRequest::DeleteNetwork(req); let update = init_execution_update(&req, &user).await?; - let ExecuteRequest::CloneRepo(req) = req else { + let ExecuteRequest::DeleteNetwork(req) = req else { unreachable!() }; let update_id = update.id.clone(); @@ -316,55 +556,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at CloneRepo"), - &update_id, - ) - .await? - } - Execution::PullRepo(req) => { - let req = ExecuteRequest::PullRepo(req); - let update = init_execution_update(&req, &user).await?; - let ExecuteRequest::PullRepo(req) = req else { - unreachable!() - }; - let update_id = update.id.clone(); - handle_resolve_result( - State - .resolve(req, (user, update)) - .await - .context("failed at PullRepo"), - &update_id, - ) - .await? - } - Execution::BuildRepo(req) => { - let req = ExecuteRequest::BuildRepo(req); - let update = init_execution_update(&req, &user).await?; - let ExecuteRequest::BuildRepo(req) = req else { - unreachable!() - }; - let update_id = update.id.clone(); - handle_resolve_result( - State - .resolve(req, (user, update)) - .await - .context("failed at BuildRepo"), - &update_id, - ) - .await? - } - Execution::CancelRepoBuild(req) => { - let req = ExecuteRequest::CancelRepoBuild(req); - let update = init_execution_update(&req, &user).await?; - let ExecuteRequest::CancelRepoBuild(req) = req else { - unreachable!() - }; - let update_id = update.id.clone(); - handle_resolve_result( - State - .resolve(req, (user, update)) - .await - .context("failed at CancelRepoBuild"), + .context("failed at DeleteNetwork"), &update_id, ) .await? @@ -385,6 +577,22 @@ async fn execute_execution( ) .await? } + Execution::DeleteImage(req) => { + let req = ExecuteRequest::DeleteImage(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::DeleteImage(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at DeleteImage"), + &update_id, + ) + .await? + } Execution::PruneImages(req) => { let req = ExecuteRequest::PruneImages(req); let update = init_execution_update(&req, &user).await?; @@ -401,10 +609,10 @@ async fn execute_execution( ) .await? } - Execution::PruneContainers(req) => { - let req = ExecuteRequest::PruneContainers(req); + Execution::DeleteVolume(req) => { + let req = ExecuteRequest::DeleteVolume(req); let update = init_execution_update(&req, &user).await?; - let ExecuteRequest::PruneContainers(req) = req else { + let ExecuteRequest::DeleteVolume(req) = req else { unreachable!() }; let update_id = update.id.clone(); @@ -412,7 +620,39 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneContainers"), + .context("failed at DeleteVolume"), + &update_id, + ) + .await? + } + Execution::PruneVolumes(req) => { + let req = ExecuteRequest::PruneVolumes(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PruneVolumes(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PruneVolumes"), + &update_id, + ) + .await? + } + Execution::PruneSystem(req) => { + let req = ExecuteRequest::PruneSystem(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PruneSystem(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PruneSystem"), &update_id, ) .await? diff --git a/bin/core/src/helpers/prune.rs b/bin/core/src/helpers/prune.rs index 6df7b32f1..669f08ab5 100644 --- a/bin/core/src/helpers/prune.rs +++ b/bin/core/src/helpers/prune.rs @@ -4,7 +4,7 @@ use async_timing_util::{ }; use futures::future::join_all; use mungos::{find::find_collect, mongodb::bson::doc}; -use periphery_client::api::build::PruneImages; +use periphery_client::api::image::PruneImages; use crate::{config::core_config, state::db_client}; diff --git a/bin/core/src/helpers/query.rs b/bin/core/src/helpers/query.rs index 68e949436..90f10fe1f 100644 --- a/bin/core/src/helpers/query.rs +++ b/bin/core/src/helpers/query.rs @@ -1,11 +1,12 @@ use std::{collections::HashMap, str::FromStr}; use anyhow::{anyhow, Context}; -use monitor_client::entities::{ +use komodo_client::entities::{ alerter::Alerter, build::Build, builder::Builder, - deployment::{ContainerSummary, Deployment, DeploymentState}, + deployment::{Deployment, DeploymentState}, + docker::container::{ContainerListItem, ContainerStateStatusEnum}, permission::PermissionLevel, procedure::Procedure, repo::Repo, @@ -14,11 +15,11 @@ use monitor_client::entities::{ stack::{Stack, StackServiceNames, StackState}, sync::ResourceSync, tag::Tag, - update::{ResourceTarget, ResourceTargetVariant, Update}, + update::Update, user::{admin_service_user, User}, user_group::UserGroup, variable::Variable, - Operation, + Operation, ResourceTarget, ResourceTargetVariant, }; use mungos::{ find::find_collect, @@ -29,17 +30,15 @@ use mungos::{ }; use crate::{ + config::core_config, resource::{self, get_user_permission_on_resource}, - state::db_client, + state::{db_client, deployment_status_cache, stack_status_cache}, }; -use super::stack::{ - compose_container_match_regex, - services::extract_services_from_stack, -}; +use super::stack::compose_container_match_regex; -#[instrument(level = "debug")] // user: Id or username +#[instrument(level = "debug")] pub async fn get_user(user: &str) -> anyhow::Result { if let Some(user) = admin_service_user(user) { return Ok(user); @@ -54,46 +53,40 @@ pub async fn get_user(user: &str) -> anyhow::Result { } #[instrument(level = "debug")] -pub async fn get_server_with_status( +pub async fn get_server_with_state( server_id_or_name: &str, ) -> anyhow::Result<(Server, ServerState)> { let server = resource::get::(server_id_or_name).await?; + let state = get_server_state(&server).await; + Ok((server, state)) +} + +#[instrument(level = "debug")] +pub async fn get_server_state(server: &Server) -> ServerState { if !server.config.enabled { - return Ok((server, ServerState::Disabled)); + return ServerState::Disabled; } - let status = match super::periphery_client(&server)? + // Unwrap ok: Server disabled check above + match super::periphery_client(server) + .unwrap() .request(periphery_client::api::GetHealth {}) .await { Ok(_) => ServerState::Ok, Err(_) => ServerState::NotOk, - }; - Ok((server, status)) + } } #[instrument(level = "debug")] pub async fn get_deployment_state( deployment: &Deployment, ) -> anyhow::Result { - if deployment.config.server_id.is_empty() { - return Ok(DeploymentState::NotDeployed); - } - let (server, status) = - get_server_with_status(&deployment.config.server_id).await?; - if status != ServerState::Ok { - return Ok(DeploymentState::Unknown); - } - let container = super::periphery_client(&server)? - .request(periphery_client::api::container::GetContainerList {}) - .await? - .into_iter() - .find(|container| container.name == deployment.name); - - let state = match container { - Some(container) => container.state, - None => DeploymentState::NotDeployed, - }; - + let state = deployment_status_cache() + .get(&deployment.id) + .await + .unwrap_or_default() + .curr + .state; Ok(state) } @@ -101,7 +94,7 @@ pub async fn get_deployment_state( pub fn get_stack_state_from_containers( ignore_services: &[String], services: &[StackServiceNames], - containers: &[ContainerSummary], + containers: &[ContainerListItem], ) -> StackState { // first filter the containers to only ones which match the service let services = services @@ -129,39 +122,39 @@ pub fn get_stack_state_from_containers( if services.len() != containers.len() { return StackState::Unhealthy; } - let running = containers - .iter() - .all(|container| container.state == DeploymentState::Running); + let running = containers.iter().all(|container| { + container.state == ContainerStateStatusEnum::Running + }); if running { return StackState::Running; } - let paused = containers - .iter() - .all(|container| container.state == DeploymentState::Paused); + let paused = containers.iter().all(|container| { + container.state == ContainerStateStatusEnum::Paused + }); if paused { return StackState::Paused; } - let stopped = containers - .iter() - .all(|container| container.state == DeploymentState::Exited); + let stopped = containers.iter().all(|container| { + container.state == ContainerStateStatusEnum::Exited + }); if stopped { return StackState::Stopped; } - let restarting = containers - .iter() - .all(|container| container.state == DeploymentState::Restarting); + let restarting = containers.iter().all(|container| { + container.state == ContainerStateStatusEnum::Restarting + }); if restarting { return StackState::Restarting; } - let dead = containers - .iter() - .all(|container| container.state == DeploymentState::Dead); + let dead = containers.iter().all(|container| { + container.state == ContainerStateStatusEnum::Dead + }); if dead { return StackState::Dead; } - let removing = containers - .iter() - .all(|container| container.state == DeploymentState::Removing); + let removing = containers.iter().all(|container| { + container.state == ContainerStateStatusEnum::Removing + }); if removing { return StackState::Removing; } @@ -175,22 +168,13 @@ pub async fn get_stack_state( if stack.config.server_id.is_empty() { return Ok(StackState::Down); } - let (server, status) = - get_server_with_status(&stack.config.server_id).await?; - if status != ServerState::Ok { - return Ok(StackState::Unknown); - } - let containers = super::periphery_client(&server)? - .request(periphery_client::api::container::GetContainerList {}) - .await?; - - let services = extract_services_from_stack(stack, false).await?; - - Ok(get_stack_state_from_containers( - &stack.config.ignore_services, - &services, - &containers, - )) + let state = stack_status_cache() + .get(&stack.id) + .await + .unwrap_or_default() + .curr + .state; + Ok(state) } #[instrument(level = "debug")] @@ -329,18 +313,6 @@ pub fn id_or_username_filter(id_or_username: &str) -> Document { } } -pub async fn get_global_variables( -) -> anyhow::Result> { - Ok( - find_collect(&db_client().await.variables, None, None) - .await - .context("failed to get all variables from db")? - .into_iter() - .map(|variable| (variable.name, variable.value)) - .collect(), - ) -} - pub async fn get_variable(name: &str) -> anyhow::Result { db_client() .await @@ -374,3 +346,33 @@ pub async fn get_latest_update( .await .context("failed to query db for latest update") } + +pub struct VariablesAndSecrets { + pub variables: HashMap, + pub secrets: HashMap, +} + +pub async fn get_variables_and_secrets( +) -> anyhow::Result { + let variables = + find_collect(&db_client().await.variables, None, None) + .await + .context("failed to get all variables from db")?; + let mut secrets = core_config().secrets.clone(); + + // extend secrets with secret variables + secrets.extend( + variables.iter().filter(|variable| variable.is_secret).map( + |variable| (variable.name.clone(), variable.value.clone()), + ), + ); + + // collect non secret variables + let variables = variables + .into_iter() + .filter(|variable| !variable.is_secret) + .map(|variable| (variable.name, variable.value)) + .collect(); + + Ok(VariablesAndSecrets { variables, secrets }) +} diff --git a/bin/core/src/helpers/repo.rs b/bin/core/src/helpers/repo.rs index 13b440e75..1382f8344 100644 --- a/bin/core/src/helpers/repo.rs +++ b/bin/core/src/helpers/repo.rs @@ -1,5 +1,5 @@ use async_timing_util::{wait_until_timelength, Timelength}; -use monitor_client::{ +use komodo_client::{ api::write::RefreshRepoCache, entities::user::repo_user, }; use mungos::find::find_collect; diff --git a/bin/core/src/helpers/stack/execute.rs b/bin/core/src/helpers/stack/execute.rs index 140f81095..8416b4acc 100644 --- a/bin/core/src/helpers/stack/execute.rs +++ b/bin/core/src/helpers/stack/execute.rs @@ -1,4 +1,4 @@ -use monitor_client::{ +use komodo_client::{ api::execute::*, entities::{ permission::PermissionLevel, diff --git a/bin/core/src/helpers/stack/mod.rs b/bin/core/src/helpers/stack/mod.rs index 7a8c9bba9..b37c62b53 100644 --- a/bin/core/src/helpers/stack/mod.rs +++ b/bin/core/src/helpers/stack/mod.rs @@ -1,6 +1,6 @@ use anyhow::{anyhow, Context}; use async_timing_util::{wait_until_timelength, Timelength}; -use monitor_client::{ +use komodo_client::{ api::write::RefreshStackCache, entities::{ permission::PermissionLevel, @@ -19,7 +19,7 @@ use crate::{ state::{db_client, State}, }; -use super::query::get_server_with_status; +use super::query::get_server_with_state; pub mod execute; pub mod remote; @@ -81,7 +81,7 @@ pub async fn get_stack_and_server( } let (server, status) = - get_server_with_status(&stack.config.server_id).await?; + get_server_with_state(&stack.config.server_id).await?; if block_if_server_unreachable && status != ServerState::Ok { return Err(anyhow!( "cannot send action when server is unreachable or disabled" diff --git a/bin/core/src/helpers/stack/remote.rs b/bin/core/src/helpers/stack/remote.rs index e3aed6b07..bab70b416 100644 --- a/bin/core/src/helpers/stack/remote.rs +++ b/bin/core/src/helpers/stack/remote.rs @@ -5,7 +5,7 @@ use std::{ use anyhow::{anyhow, Context}; use formatting::format_serror; -use monitor_client::entities::{ +use komodo_client::entities::{ stack::{ComposeContents, Stack}, update::Log, CloneArgs, @@ -118,6 +118,7 @@ pub async fn clone_remote_repo( &[], "", None, + &[], ) .await .context("failed to clone stack repo") diff --git a/bin/core/src/helpers/stack/services.rs b/bin/core/src/helpers/stack/services.rs index 13d161796..d1db0ddc9 100644 --- a/bin/core/src/helpers/stack/services.rs +++ b/bin/core/src/helpers/stack/services.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::entities::stack::{ +use komodo_client::entities::stack::{ ComposeContents, ComposeFile, ComposeService, Stack, StackServiceNames, }; diff --git a/bin/core/src/helpers/sync/deploy.rs b/bin/core/src/helpers/sync/deploy.rs index 60fc5c03e..e388a1519 100644 --- a/bin/core/src/helpers/sync/deploy.rs +++ b/bin/core/src/helpers/sync/deploy.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, time::Duration}; use anyhow::{anyhow, Context}; use formatting::{bold, colored, format_serror, muted, Color}; use futures::future::join_all; -use monitor_client::{ +use komodo_client::{ api::{ execute::{Deploy, DeployStack}, read::ListBuildVersions, @@ -16,8 +16,9 @@ use monitor_client::{ stack::{PartialStackConfig, Stack, StackConfig, StackState}, sync::SyncDeployUpdate, toml::ResourceToml, - update::{Log, ResourceTarget}, + update::Log, user::sync_user, + ResourceTarget, }, }; use resolver_api::Resolve; @@ -55,6 +56,9 @@ pub async fn deploy_from_cache( mut to_deploy: ToDeployCache, logs: &mut Vec, ) { + if to_deploy.is_empty() { + return; + } let mut log = format!( "{}: running executions to sync deployment / stack state", muted("INFO") @@ -409,7 +413,7 @@ fn build_cache_for_deployment<'a>( let deployed_version = status .container .as_ref() - .and_then(|c| c.image.split(':').last()) + .and_then(|c| c.image.as_ref()?.split(':').last()) .unwrap_or("0.0.0"); match build_version_cache.get(build_id) { Some(version) if deployed_version != version => { diff --git a/bin/core/src/helpers/sync/file.rs b/bin/core/src/helpers/sync/file.rs index 3ad9b13e7..35b386213 100644 --- a/bin/core/src/helpers/sync/file.rs +++ b/bin/core/src/helpers/sync/file.rs @@ -2,7 +2,7 @@ use std::{fs, path::Path}; use anyhow::{anyhow, Context}; use formatting::{colored, muted, Color}; -use monitor_client::entities::{toml::ResourcesToml, update::Log}; +use komodo_client::entities::{toml::ResourcesToml, update::Log}; use serde::de::DeserializeOwned; pub fn read_resources( diff --git a/bin/core/src/helpers/sync/mod.rs b/bin/core/src/helpers/sync/mod.rs index f64455736..6f32ef033 100644 --- a/bin/core/src/helpers/sync/mod.rs +++ b/bin/core/src/helpers/sync/mod.rs @@ -1,5 +1,5 @@ use async_timing_util::{wait_until_timelength, Timelength}; -use monitor_client::{ +use komodo_client::{ api::write::RefreshResourceSyncPending, entities::user::sync_user, }; use mungos::find::find_collect; diff --git a/bin/core/src/helpers/sync/remote.rs b/bin/core/src/helpers/sync/remote.rs index d98df1b4e..890aeb393 100644 --- a/bin/core/src/helpers/sync/remote.rs +++ b/bin/core/src/helpers/sync/remote.rs @@ -1,7 +1,7 @@ use std::fs; use anyhow::{anyhow, Context}; -use monitor_client::entities::{ +use komodo_client::entities::{ sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs, }; @@ -66,6 +66,7 @@ pub async fn get_remote_resources( &[], "", None, + &[], ) .await .context("failed to clone resource repo")?; diff --git a/bin/core/src/helpers/sync/resource.rs b/bin/core/src/helpers/sync/resource.rs index c872cef66..16bf0955c 100644 --- a/bin/core/src/helpers/sync/resource.rs +++ b/bin/core/src/helpers/sync/resource.rs @@ -2,31 +2,21 @@ use std::collections::HashMap; use anyhow::Context; use formatting::{bold, colored, muted, Color}; -use monitor_client::{ +use komodo_client::{ api::write::{UpdateDescription, UpdateTagsOnResource}, entities::{ - self, - alerter::Alerter, - build::Build, - builder::Builder, - deployment::Deployment, - procedure::Procedure, - repo::Repo, - server::Server, - server_template::ServerTemplate, - stack::Stack, - sync::SyncUpdate, - tag::Tag, - toml::ResourceToml, - update::{Log, ResourceTarget}, - user::sync_user, + self, alerter::Alerter, build::Build, builder::Builder, + deployment::Deployment, procedure::Procedure, repo::Repo, + server::Server, server_template::ServerTemplate, stack::Stack, + sync::SyncUpdate, tag::Tag, toml::ResourceToml, update::Log, + user::sync_user, ResourceTarget, }, }; use mungos::find::find_collect; use partial_derive2::{Diff, FieldDiff, MaybeNone}; use resolver_api::Resolve; -use crate::{resource::MonitorResource, state::State}; +use crate::{resource::KomodoResource, state::State}; pub type ToUpdate = Vec>; pub type ToCreate = Vec>; @@ -42,7 +32,7 @@ pub struct ToUpdateItem { pub update_tags: bool, } -pub trait ResourceSync: MonitorResource + Sized { +pub trait ResourceSync: KomodoResource + Sized { fn resource_target(id: String) -> ResourceTarget; /// Apply any changes to incoming toml partial config diff --git a/bin/core/src/helpers/sync/resources.rs b/bin/core/src/helpers/sync/resources.rs index 38d7efa01..d564c4836 100644 --- a/bin/core/src/helpers/sync/resources.rs +++ b/bin/core/src/helpers/sync/resources.rs @@ -1,5 +1,5 @@ use formatting::{bold, colored, muted, Color}; -use monitor_client::{ +use komodo_client::{ api::execute::Execution, entities::{ self, @@ -12,8 +12,9 @@ use monitor_client::{ server::Server, server_template::ServerTemplate, stack::Stack, - update::{Log, ResourceTarget}, + update::Log, user::sync_user, + ResourceTarget, }, }; use partial_derive2::{MaybeNone, PartialDiff}; @@ -23,7 +24,7 @@ use crate::{ run_update_description, run_update_tags, ResourceSync, ToUpdateItem, }, - resource::MonitorResource, + resource::KomodoResource, }; use super::resource::{ @@ -266,42 +267,42 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::StartContainer(config) => { + Execution::StartDeployment(config) => { config.deployment = resources .deployments .get(&config.deployment) .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::RestartContainer(config) => { + Execution::RestartDeployment(config) => { config.deployment = resources .deployments .get(&config.deployment) .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::PauseContainer(config) => { + Execution::PauseDeployment(config) => { config.deployment = resources .deployments .get(&config.deployment) .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::UnpauseContainer(config) => { + Execution::UnpauseDeployment(config) => { config.deployment = resources .deployments .get(&config.deployment) .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::StopContainer(config) => { + Execution::StopDeployment(config) => { config.deployment = resources .deployments .get(&config.deployment) .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::RemoveContainer(config) => { + Execution::DestroyDeployment(config) => { config.deployment = resources .deployments .get(&config.deployment) @@ -336,6 +337,76 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } + Execution::StartContainer(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::RestartContainer(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::PauseContainer(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::UnpauseContainer(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::StopContainer(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::DestroyContainer(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::StartAllContainers(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::RestartAllContainers(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::PauseAllContainers(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::UnpauseAllContainers(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } Execution::StopAllContainers(config) => { config.server = resources .servers @@ -343,6 +414,20 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } + Execution::PruneContainers(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::DeleteNetwork(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } Execution::PruneNetworks(config) => { config.server = resources .servers @@ -350,6 +435,13 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } + Execution::DeleteImage(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } Execution::PruneImages(config) => { config.server = resources .servers @@ -357,7 +449,21 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } - Execution::PruneContainers(config) => { + Execution::DeleteVolume(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::PruneVolumes(config) => { + config.server = resources + .servers + .get(&config.server) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::PruneSystem(config) => { config.server = resources .servers .get(&config.server) diff --git a/bin/core/src/helpers/sync/user_groups.rs b/bin/core/src/helpers/sync/user_groups.rs index 552955686..5f2f34b86 100644 --- a/bin/core/src/helpers/sync/user_groups.rs +++ b/bin/core/src/helpers/sync/user_groups.rs @@ -2,7 +2,7 @@ use std::{cmp::Ordering, collections::HashMap}; use anyhow::Context; use formatting::{bold, colored, muted, Color}; -use monitor_client::{ +use komodo_client::{ api::{ read::ListUserTargetPermissions, write::{ @@ -14,8 +14,9 @@ use monitor_client::{ permission::{PermissionLevel, UserTarget}, sync::SyncUpdate, toml::{PermissionToml, UserGroupToml}, - update::{Log, ResourceTarget, ResourceTargetVariant}, + update::Log, user::sync_user, + ResourceTarget, ResourceTargetVariant, }, }; use mungos::find::find_collect; diff --git a/bin/core/src/helpers/sync/variables.rs b/bin/core/src/helpers/sync/variables.rs index b0d8b7577..e2b3d0a96 100644 --- a/bin/core/src/helpers/sync/variables.rs +++ b/bin/core/src/helpers/sync/variables.rs @@ -2,10 +2,9 @@ use std::collections::HashMap; use anyhow::Context; use formatting::{bold, colored, muted, Color}; -use monitor_client::{ +use komodo_client::{ api::write::{ - CreateVariable, DeleteVariable, UpdateVariableDescription, - UpdateVariableValue, + CreateVariable, DeleteVariable, UpdateVariableDescription, UpdateVariableIsSecret, UpdateVariableValue }, entities::{ sync::SyncUpdate, update::Log, user::sync_user, @@ -21,6 +20,7 @@ pub struct ToUpdateItem { pub variable: Variable, pub update_value: bool, pub update_description: bool, + pub update_is_secret: bool, } pub async fn get_updates_for_view( @@ -57,6 +57,7 @@ pub async fn get_updates_for_view( update_value: original.value != variable.value, update_description: original.description != variable.description, + update_is_secret: original.is_secret != variable.is_secret, variable, }; if !item.update_value && !item.update_description { @@ -72,14 +73,31 @@ pub async fn get_updates_for_view( let mut lines = Vec::::new(); if item.update_value { - lines.push(format!( - "{}: 'value'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(&original.value, Color::Red), - muted("to"), - colored(&item.variable.value, Color::Green) - )) + let mut log = format!("{}: 'value'\n", muted("field"),); + if item.variable.is_secret { + log.push_str(&format!( + "{}: {}\n{}: {}", + muted("from"), + colored( + original.value.replace(|_| true, "#"), + Color::Red + ), + muted("to"), + colored( + item.variable.value.replace(|_| true, "#"), + Color::Green + ) + )); + } else { + log.push_str(&format!( + "{}: {}\n{}: {}", + muted("from"), + colored(&original.value, Color::Red), + muted("to"), + colored(&item.variable.value, Color::Green) + )); + } + lines.push(log); } if item.update_description { @@ -93,6 +111,17 @@ pub async fn get_updates_for_view( )) } + if item.update_is_secret { + lines.push(format!( + "{}: 'is_secret'\n{}: {}\n{}: {}", + muted("field"), + muted("from"), + colored(original.is_secret, Color::Red), + muted("to"), + colored(item.variable.is_secret, Color::Green) + )) + } + update.log.push('\n'); update.log.push_str(&lines.join("\n-------------------\n")); } @@ -100,22 +129,40 @@ pub async fn get_updates_for_view( update.to_create += 1; if variable.description.is_empty() { update.log.push_str(&format!( - "\n\n{}: variable: {}\n{}: {}", + "\n\n{}: variable: {}", colored("CREATE", Color::Green), colored(&variable.name, Color::Green), - muted("value"), - variable.value, )); + if variable.is_secret { + update + .log + .push_str(&format!("\n{}: true", muted("is secret"),)); + } else { + update.log.push_str(&format!( + "\n{}: {}", + muted("value"), + variable.value, + )); + } } else { update.log.push_str(&format!( - "\n\n{}: variable: {}\n{}: {}\n{}: {}", + "\n\n{}: variable: {}\n{}: {}", colored("CREATE", Color::Green), colored(&variable.name, Color::Green), muted("description"), variable.description, - muted("value"), - variable.value, )); + if variable.is_secret { + update + .log + .push_str(&format!("\n{}: true", muted("is secret"),)); + } else { + update.log.push_str(&format!( + "\n{}: {}", + muted("value"), + variable.value, + )); + } } } } @@ -166,9 +213,13 @@ pub async fn get_updates_for_execution( update_value: original.value != variable.value, update_description: original.description != variable.description, + update_is_secret: original.is_secret != variable.is_secret, variable, }; - if !item.update_value && !item.update_description { + if !item.update_value + && !item.update_description + && !item.update_is_secret + { continue; } @@ -203,6 +254,7 @@ pub async fn run_updates( name: variable.name.clone(), value: variable.value, description: variable.description, + is_secret: variable.is_secret, }, sync_user().to_owned(), ) @@ -228,6 +280,7 @@ pub async fn run_updates( variable, update_value, update_description, + update_is_secret, } in to_update { if update_value { @@ -282,6 +335,32 @@ pub async fn run_updates( )) }; } + if update_is_secret { + if let Err(e) = State + .resolve( + UpdateVariableIsSecret { + name: variable.name.clone(), + is_secret: variable.is_secret, + }, + sync_user().to_owned(), + ) + .await + { + has_error = true; + log.push_str(&format!( + "\n{}: failed to update variable is secret for '{}' | {e:#}", + colored("ERROR", Color::Red), + bold(&variable.name) + )) + } else { + log.push_str(&format!( + "\n{}: {} variable '{}' is secret", + muted("INFO"), + colored("updated", Color::Blue), + bold(&variable.name) + )) + }; + } } for variable in to_delete { diff --git a/bin/core/src/helpers/update.rs b/bin/core/src/helpers/update.rs index f10182fb2..ff66a3dd1 100644 --- a/bin/core/src/helpers/update.rs +++ b/bin/core/src/helpers/update.rs @@ -1,17 +1,17 @@ use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ build::Build, deployment::Deployment, - monitor_timestamp, + komodo_timestamp, procedure::Procedure, repo::Repo, server::Server, server_template::ServerTemplate, stack::Stack, sync::ResourceSync, - update::{ResourceTarget, Update, UpdateListItem}, + update::{Update, UpdateListItem}, user::User, - Operation, + Operation, ResourceTarget, }; use mungos::{ by_id::{find_one_by_id, update_one_by_id}, @@ -30,7 +30,7 @@ pub fn make_update( user: &User, ) -> Update { Update { - start_ts: monitor_timestamp(), + start_ts: komodo_timestamp(), target: target.into(), operation, operator: user.id.clone(), @@ -128,6 +128,66 @@ pub async fn init_execution_update( ) -> anyhow::Result { let (operation, target) = match &request { // Server + ExecuteRequest::StartContainer(data) => ( + Operation::StartContainer, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::RestartContainer(data) => ( + Operation::RestartContainer, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::PauseContainer(data) => ( + Operation::PauseContainer, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::UnpauseContainer(data) => ( + Operation::UnpauseContainer, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::StopContainer(data) => ( + Operation::StopContainer, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::DestroyContainer(data) => ( + Operation::DestroyContainer, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::StartAllContainers(data) => ( + Operation::StartAllContainers, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::RestartAllContainers(data) => ( + Operation::RestartAllContainers, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::PauseAllContainers(data) => ( + Operation::PauseAllContainers, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::UnpauseAllContainers(data) => ( + Operation::UnpauseAllContainers, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), ExecuteRequest::StopAllContainers(data) => ( Operation::StopAllContainers, ResourceTarget::Server( @@ -135,7 +195,25 @@ pub async fn init_execution_update( ), ), ExecuteRequest::PruneContainers(data) => ( - Operation::PruneImages, + Operation::PruneContainers, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::DeleteNetwork(data) => ( + Operation::DeleteNetwork, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::PruneNetworks(data) => ( + Operation::PruneNetworks, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::DeleteImage(data) => ( + Operation::DeleteImage, ResourceTarget::Server( resource::get::(&data.server).await?.id, ), @@ -146,8 +224,20 @@ pub async fn init_execution_update( resource::get::(&data.server).await?.id, ), ), - ExecuteRequest::PruneNetworks(data) => ( - Operation::PruneNetworks, + ExecuteRequest::DeleteVolume(data) => ( + Operation::DeleteVolume, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::PruneVolumes(data) => ( + Operation::PruneVolumes, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), + ExecuteRequest::PruneSystem(data) => ( + Operation::PruneSystem, ResourceTarget::Server( resource::get::(&data.server).await?.id, ), @@ -160,38 +250,38 @@ pub async fn init_execution_update( resource::get::(&data.deployment).await?.id, ), ), - ExecuteRequest::StartContainer(data) => ( - Operation::StartContainer, + ExecuteRequest::StartDeployment(data) => ( + Operation::StartDeployment, ResourceTarget::Deployment( resource::get::(&data.deployment).await?.id, ), ), - ExecuteRequest::RestartContainer(data) => ( - Operation::RestartContainer, + ExecuteRequest::RestartDeployment(data) => ( + Operation::RestartDeployment, ResourceTarget::Deployment( resource::get::(&data.deployment).await?.id, ), ), - ExecuteRequest::PauseContainer(data) => ( - Operation::PauseContainer, + ExecuteRequest::PauseDeployment(data) => ( + Operation::PauseDeployment, ResourceTarget::Deployment( resource::get::(&data.deployment).await?.id, ), ), - ExecuteRequest::UnpauseContainer(data) => ( - Operation::UnpauseContainer, + ExecuteRequest::UnpauseDeployment(data) => ( + Operation::UnpauseDeployment, ResourceTarget::Deployment( resource::get::(&data.deployment).await?.id, ), ), - ExecuteRequest::StopContainer(data) => ( - Operation::StopContainer, + ExecuteRequest::StopDeployment(data) => ( + Operation::StopDeployment, ResourceTarget::Deployment( resource::get::(&data.deployment).await?.id, ), ), - ExecuteRequest::RemoveContainer(data) => ( - Operation::RemoveContainer, + ExecuteRequest::DestroyDeployment(data) => ( + Operation::DestroyDeployment, ResourceTarget::Deployment( resource::get::(&data.deployment).await?.id, ), diff --git a/bin/core/src/listener/github/build.rs b/bin/core/src/listener/github/build.rs index 702a5e19e..82f998cb3 100644 --- a/bin/core/src/listener/github/build.rs +++ b/bin/core/src/listener/github/build.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::anyhow; use axum::http::HeaderMap; -use monitor_client::{ +use komodo_client::{ api::execute::RunBuild, entities::{build::Build, user::git_webhook_user}, }; diff --git a/bin/core/src/listener/github/mod.rs b/bin/core/src/listener/github/mod.rs index f6099260a..428ded0c8 100644 --- a/bin/core/src/listener/github/mod.rs +++ b/bin/core/src/listener/github/mod.rs @@ -30,7 +30,7 @@ struct Id { #[derive(Deserialize)] struct IdBranch { id: String, - branch: String, + branch: Option, } pub fn router() -> Router { @@ -152,7 +152,7 @@ pub fn router() -> Router { async { let res = procedure::handle_procedure_webhook( id.clone(), - branch, + branch.unwrap_or_else(|| String::from("main")), headers, body ).await; diff --git a/bin/core/src/listener/github/procedure.rs b/bin/core/src/listener/github/procedure.rs index a49c68aa6..472ed1ad4 100644 --- a/bin/core/src/listener/github/procedure.rs +++ b/bin/core/src/listener/github/procedure.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::anyhow; use axum::http::HeaderMap; -use monitor_client::{ +use komodo_client::{ api::execute::RunProcedure, entities::{procedure::Procedure, user::git_webhook_user}, }; diff --git a/bin/core/src/listener/github/repo.rs b/bin/core/src/listener/github/repo.rs index 2ea280507..3396739b6 100644 --- a/bin/core/src/listener/github/repo.rs +++ b/bin/core/src/listener/github/repo.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::anyhow; use axum::http::HeaderMap; -use monitor_client::{ +use komodo_client::{ api::execute::{BuildRepo, CloneRepo, PullRepo}, entities::{repo::Repo, user::git_webhook_user}, }; diff --git a/bin/core/src/listener/github/stack.rs b/bin/core/src/listener/github/stack.rs index c37d15d8b..92b8cc90b 100644 --- a/bin/core/src/listener/github/stack.rs +++ b/bin/core/src/listener/github/stack.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::anyhow; use axum::http::HeaderMap; -use monitor_client::{ +use komodo_client::{ api::{execute::DeployStack, write::RefreshStackCache}, entities::{stack::Stack, user::git_webhook_user}, }; diff --git a/bin/core/src/listener/github/sync.rs b/bin/core/src/listener/github/sync.rs index b9220b7d0..e254766e3 100644 --- a/bin/core/src/listener/github/sync.rs +++ b/bin/core/src/listener/github/sync.rs @@ -2,7 +2,7 @@ use std::sync::OnceLock; use anyhow::anyhow; use axum::http::HeaderMap; -use monitor_client::{ +use komodo_client::{ api::{execute::RunSync, write::RefreshResourceSyncPending}, entities::{sync::ResourceSync, user::git_webhook_user}, }; diff --git a/bin/core/src/main.rs b/bin/core/src/main.rs index 74329d044..ed3c11f9a 100644 --- a/bin/core/src/main.rs +++ b/bin/core/src/main.rs @@ -28,7 +28,7 @@ async fn app() -> anyhow::Result<()> { dotenvy::dotenv().ok(); let config = core_config(); logger::init(&config.logging)?; - info!("monitor core version: v{}", env!("CARGO_PKG_VERSION")); + info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION")); info!("config: {:?}", config.sanitized()); // includes init db_client check to crash on db init failure @@ -77,7 +77,7 @@ async fn app() -> anyhow::Result<()> { .await .context("failed to bind to tcp listener")?; - info!("monitor core listening on {socket_addr}"); + info!("Komodo Core listening on {socket_addr}"); axum::serve(listener, app).await.context("server crashed")?; diff --git a/bin/core/src/monitor/alert/deployment.rs b/bin/core/src/monitor/alert/deployment.rs index 6c19eb348..640a10fac 100644 --- a/bin/core/src/monitor/alert/deployment.rs +++ b/bin/core/src/monitor/alert/deployment.rs @@ -1,10 +1,9 @@ use std::collections::HashMap; -use monitor_client::entities::{ - alert::{Alert, AlertData}, +use komodo_client::entities::{ + alert::{Alert, AlertData, SeverityLevel}, deployment::{Deployment, DeploymentState}, - server::stats::SeverityLevel, - update::ResourceTarget, + ResourceTarget, }; use crate::{ diff --git a/bin/core/src/monitor/alert/mod.rs b/bin/core/src/monitor/alert/mod.rs index 9f00d0bc9..5ddd16eb1 100644 --- a/bin/core/src/monitor/alert/mod.rs +++ b/bin/core/src/monitor/alert/mod.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ resource::ResourceQuery, server::{Server, ServerListItem}, user::User, diff --git a/bin/core/src/monitor/alert/server.rs b/bin/core/src/monitor/alert/server.rs index c34697ac5..e5c193fc3 100644 --- a/bin/core/src/monitor/alert/server.rs +++ b/bin/core/src/monitor/alert/server.rs @@ -3,11 +3,11 @@ use std::{collections::HashMap, path::PathBuf, str::FromStr}; use anyhow::Context; use derive_variants::ExtractVariant; use mongo_indexed::Indexed; -use monitor_client::entities::{ - alert::{Alert, AlertData, AlertDataVariant}, - monitor_timestamp, optional_string, - server::{stats::SeverityLevel, ServerListItem, ServerState}, - update::ResourceTarget, +use komodo_client::entities::{ + alert::{Alert, AlertData, AlertDataVariant, SeverityLevel}, + komodo_timestamp, optional_string, + server::{ServerListItem, ServerState}, + ResourceTarget, }; use mungos::{ bulk_update::{self, BulkUpdate}, @@ -479,7 +479,7 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) { doc! { "$set": { "resolved": true, - "resolved_ts": monitor_timestamp() + "resolved_ts": komodo_timestamp() } }, ) @@ -488,7 +488,7 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) { .inspect_err(|e| warn!("{e:#}")) .ok(); - let ts = monitor_timestamp(); + let ts = komodo_timestamp(); let closed = alerts .iter() diff --git a/bin/core/src/monitor/alert/stack.rs b/bin/core/src/monitor/alert/stack.rs index aa60edc6f..845f53f97 100644 --- a/bin/core/src/monitor/alert/stack.rs +++ b/bin/core/src/monitor/alert/stack.rs @@ -1,10 +1,9 @@ use std::collections::HashMap; -use monitor_client::entities::{ - alert::{Alert, AlertData}, - server::stats::SeverityLevel, +use komodo_client::entities::{ + alert::{Alert, AlertData, SeverityLevel}, stack::{Stack, StackState}, - update::ResourceTarget, + ResourceTarget, }; use crate::{ diff --git a/bin/core/src/monitor/helpers.rs b/bin/core/src/monitor/helpers.rs index 16ac10037..cc94d4a81 100644 --- a/bin/core/src/monitor/helpers.rs +++ b/bin/core/src/monitor/helpers.rs @@ -1,15 +1,8 @@ -use monitor_client::entities::{ - deployment::{ContainerSummary, Deployment, DeploymentState}, - repo::Repo, - server::{ - docker_image::ImageSummary, - docker_network::DockerNetwork, - stats::{ - ServerHealth, SeverityLevel, SingleDiskUsage, SystemStats, - }, - Server, ServerConfig, ServerState, - }, - stack::{ComposeProject, Stack, StackState}, +use komodo_client::entities::{ + alert::SeverityLevel, deployment::{Deployment, DeploymentState}, docker::{ + container::ContainerListItem, image::ImageListItem, + network::NetworkListItem, volume::VolumeListItem, + }, repo::Repo, server::{Server, ServerConfig, ServerHealth, ServerState}, stack::{ComposeProject, Stack, StackState}, stats::{SingleDiskUsage, SystemStats} }; use serror::Serror; @@ -89,9 +82,10 @@ pub async fn insert_stacks_status_unknown(stacks: Vec) { } type DockerLists = ( - Option>, - Option>, - Option>, + Option>, + Option>, + Option>, + Option>, Option>, ); @@ -101,7 +95,7 @@ pub async fn insert_server_status( state: ServerState, version: String, stats: Option, - (containers, networks, images, projects): DockerLists, + (containers, networks, images, volumes, projects): DockerLists, err: impl Into>, ) { let health = stats.as_ref().map(|s| get_server_health(server, s)); @@ -117,6 +111,7 @@ pub async fn insert_server_status( containers, networks, images, + volumes, projects, err: err.into(), } diff --git a/bin/core/src/monitor/lists.rs b/bin/core/src/monitor/lists.rs index 7efc3aac3..7eace0ec0 100644 --- a/bin/core/src/monitor/lists.rs +++ b/bin/core/src/monitor/lists.rs @@ -1,52 +1,51 @@ -use anyhow::Context; -use monitor_client::entities::{ - deployment::ContainerSummary, - server::{ - docker_image::ImageSummary, docker_network::DockerNetwork, +use komodo_client::entities::{ + docker::{ + container::ContainerListItem, image::ImageListItem, + network::NetworkListItem, volume::VolumeListItem, }, stack::ComposeProject, }; use periphery_client::{ - api::{ - container::GetContainerList, GetDockerLists, - GetDockerListsResponse, - }, + api::{GetDockerLists, GetDockerListsResponse}, PeripheryClient, }; pub async fn get_docker_lists( periphery: &PeripheryClient, ) -> anyhow::Result<( - Vec, - Vec, - Vec, + Vec, + Vec, + Vec, + Vec, Vec, )> { - if let Ok(GetDockerListsResponse { + let GetDockerListsResponse { containers, networks, images, + volumes, projects, - }) = periphery.request(GetDockerLists {}).await - { - // TODO: handle the errors - let (mut containers, mut networks, images, mut projects) = ( - containers.unwrap_or_default(), - networks.unwrap_or_default(), - images.unwrap_or_default(), - projects.unwrap_or_default(), - ); - containers.sort_by(|a, b| a.name.cmp(&b.name)); - networks.sort_by(|a, b| a.name.cmp(&b.name)); - projects.sort_by(|a, b| a.name.cmp(&b.name)); - return Ok((containers, networks, images, projects)); - } - // Fallback to ListContainers for backward compat w/ v1.12 - let mut containers = - periphery - .request(GetContainerList {}) - .await - .context("failed to get docker container list")?; + } = periphery.request(GetDockerLists {}).await?; + // TODO: handle the errors + let ( + mut containers, + mut networks, + mut images, + mut volumes, + mut projects, + ) = ( + containers.unwrap_or_default(), + networks.unwrap_or_default(), + images.unwrap_or_default(), + volumes.unwrap_or_default(), + projects.unwrap_or_default(), + ); + containers.sort_by(|a, b| a.name.cmp(&b.name)); - Ok((containers, Vec::new(), Vec::new(), Vec::new())) + networks.sort_by(|a, b| a.name.cmp(&b.name)); + images.sort_by(|a, b| a.name.cmp(&b.name)); + volumes.sort_by(|a, b| a.name.cmp(&b.name)); + projects.sort_by(|a, b| a.name.cmp(&b.name)); + + Ok((containers, networks, images, volumes, projects)) } diff --git a/bin/core/src/monitor/mod.rs b/bin/core/src/monitor/mod.rs index 5709b44ca..75ad5393f 100644 --- a/bin/core/src/monitor/mod.rs +++ b/bin/core/src/monitor/mod.rs @@ -1,16 +1,16 @@ use async_timing_util::wait_until_timelength; use futures::future::join_all; use helpers::insert_stacks_status_unknown; -use monitor_client::entities::{ - deployment::{ContainerSummary, DeploymentState}, - monitor_timestamp, - server::{ - docker_image::ImageSummary, - docker_network::DockerNetwork, - stats::{ServerHealth, SystemStats}, - Server, ServerState, +use komodo_client::entities::{ + deployment::DeploymentState, + docker::{ + container::ContainerListItem, image::ImageListItem, + network::NetworkListItem, volume::VolumeListItem, }, + komodo_timestamp, + server::{Server, ServerHealth, ServerState}, stack::{ComposeProject, StackService, StackState}, + stats::SystemStats, }; use mungos::{find::find_collect, mongodb::bson::doc}; use periphery_client::api::{self, git::GetLatestCommit}; @@ -47,9 +47,10 @@ pub struct CachedServerStatus { pub version: String, pub stats: Option, pub health: Option, - pub containers: Option>, - pub networks: Option>, - pub images: Option>, + pub containers: Option>, + pub networks: Option>, + pub images: Option>, + pub volumes: Option>, pub projects: Option>, /// Store the error in reaching periphery pub err: Option, @@ -60,7 +61,7 @@ pub struct CachedDeploymentStatus { /// The deployment id pub id: String, pub state: DeploymentState, - pub container: Option, + pub container: Option, } #[derive(Default, Clone, Debug)] @@ -87,7 +88,7 @@ pub fn spawn_monitor_loop() { .try_into() .expect("Invalid monitoring interval"); tokio::spawn(async move { - refresh_server_cache(monitor_timestamp()).await; + refresh_server_cache(komodo_timestamp()).await; loop { let ts = (wait_until_timelength(interval, ADDITIONAL_MS).await - ADDITIONAL_MS) as i64; @@ -153,7 +154,7 @@ pub async fn update_cache_for_server(server: &Server) { ServerState::Disabled, String::from("unknown"), None, - (None, None, None, None), + (None, None, None, None, None), None, ) .await; @@ -178,7 +179,7 @@ pub async fn update_cache_for_server(server: &Server) { ServerState::NotOk, String::from("unknown"), None, - (None, None, None, None), + (None, None, None, None, None), Serror::from(&e), ) .await; @@ -198,7 +199,7 @@ pub async fn update_cache_for_server(server: &Server) { ServerState::NotOk, String::from("unknown"), None, - (None, None, None, None), + (None, None, None, None, None), Serror::from(&e), ) .await; @@ -210,7 +211,7 @@ pub async fn update_cache_for_server(server: &Server) { }; match lists::get_docker_lists(&periphery).await { - Ok((containers, networks, images, projects)) => { + Ok((containers, networks, images, volumes, projects)) => { tokio::join!( resources::update_deployment_cache(deployments, &containers), resources::update_stack_cache(stacks, &containers), @@ -224,6 +225,7 @@ pub async fn update_cache_for_server(server: &Server) { Some(containers.clone()), Some(networks), Some(images), + Some(volumes), Some(projects), ), None, @@ -241,7 +243,7 @@ pub async fn update_cache_for_server(server: &Server) { ServerState::Ok, version, stats, - (None, None, None, None), + (None, None, None, None, None), Some(e.into()), ) .await; diff --git a/bin/core/src/monitor/record.rs b/bin/core/src/monitor/record.rs index f489522cd..942be11d6 100644 --- a/bin/core/src/monitor/record.rs +++ b/bin/core/src/monitor/record.rs @@ -1,4 +1,4 @@ -use monitor_client::entities::server::stats::{ +use komodo_client::entities::stats::{ sum_disk_usage, SystemStatsRecord, TotalDiskUsage, }; diff --git a/bin/core/src/monitor/resources.rs b/bin/core/src/monitor/resources.rs index 13c103ec2..d33cf7a93 100644 --- a/bin/core/src/monitor/resources.rs +++ b/bin/core/src/monitor/resources.rs @@ -1,6 +1,7 @@ use anyhow::Context; -use monitor_client::entities::{ - deployment::{ContainerSummary, Deployment, DeploymentState}, +use komodo_client::entities::{ + deployment::{Deployment, DeploymentState}, + docker::container::ContainerListItem, stack::{Stack, StackService, StackServiceNames}, }; @@ -19,7 +20,7 @@ use super::{CachedDeploymentStatus, CachedStackStatus, History}; pub async fn update_deployment_cache( deployments: Vec, - containers: &[ContainerSummary], + containers: &[ContainerListItem], ) { let deployment_status_cache = deployment_status_cache(); for deployment in deployments { @@ -33,7 +34,7 @@ pub async fn update_deployment_cache( .map(|s| s.curr.state); let state = container .as_ref() - .map(|c| c.state) + .map(|c| c.state.into()) .unwrap_or(DeploymentState::NotDeployed); deployment_status_cache .insert( @@ -54,7 +55,7 @@ pub async fn update_deployment_cache( pub async fn update_stack_cache( stacks: Vec, - containers: &[ContainerSummary], + containers: &[ContainerListItem], ) { let stack_status_cache = stack_status_cache(); for stack in stacks { diff --git a/bin/core/src/resource/alerter.rs b/bin/core/src/resource/alerter.rs index 48686c70f..cd507a6e5 100644 --- a/bin/core/src/resource/alerter.rs +++ b/bin/core/src/resource/alerter.rs @@ -1,19 +1,19 @@ use derive_variants::ExtractVariant; -use monitor_client::entities::{ +use komodo_client::entities::{ alerter::{ Alerter, AlerterConfig, AlerterConfigDiff, AlerterListItem, AlerterListItemInfo, AlerterQuerySpecifics, PartialAlerterConfig, }, resource::Resource, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }; use mungos::mongodb::Collection; use crate::state::db_client; -impl super::MonitorResource for Alerter { +impl super::KomodoResource for Alerter { type Config = AlerterConfig; type PartialConfig = PartialAlerterConfig; type ConfigDiff = AlerterConfigDiff; diff --git a/bin/core/src/resource/build.rs b/bin/core/src/resource/build.rs index afbeea2ce..b4073db2e 100644 --- a/bin/core/src/resource/build.rs +++ b/bin/core/src/resource/build.rs @@ -1,7 +1,7 @@ use std::time::Duration; use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ build::{ Build, BuildConfig, BuildConfigDiff, BuildInfo, BuildListItem, BuildListItemInfo, BuildQuerySpecifics, BuildState, @@ -10,9 +10,9 @@ use monitor_client::entities::{ builder::Builder, permission::PermissionLevel, resource::Resource, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }; use mungos::{ find::find_collect, @@ -24,7 +24,7 @@ use crate::{ state::{action_states, build_state_cache, db_client}, }; -impl super::MonitorResource for Build { +impl super::KomodoResource for Build { type Config = BuildConfig; type PartialConfig = PartialBuildConfig; type ConfigDiff = BuildConfigDiff; diff --git a/bin/core/src/resource/builder.rs b/bin/core/src/resource/builder.rs index ad97c8ac1..e5c98d4e1 100644 --- a/bin/core/src/resource/builder.rs +++ b/bin/core/src/resource/builder.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ builder::{ Builder, BuilderConfig, BuilderConfigDiff, BuilderConfigVariant, BuilderListItem, BuilderListItemInfo, BuilderQuerySpecifics, @@ -8,9 +8,9 @@ use monitor_client::entities::{ permission::PermissionLevel, resource::Resource, server::Server, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - MergePartial, Operation, + MergePartial, Operation, ResourceTargetVariant, }; use mungos::mongodb::{ bson::{doc, to_document, Document}, @@ -19,7 +19,7 @@ use mungos::mongodb::{ use crate::state::db_client; -impl super::MonitorResource for Builder { +impl super::KomodoResource for Builder { type Config = BuilderConfig; type PartialConfig = PartialBuilderConfig; type ConfigDiff = BuilderConfigDiff; diff --git a/bin/core/src/resource/deployment.rs b/bin/core/src/resource/deployment.rs index dbc458ab8..b9bb4ddf6 100644 --- a/bin/core/src/resource/deployment.rs +++ b/bin/core/src/resource/deployment.rs @@ -1,6 +1,6 @@ use anyhow::Context; use formatting::format_serror; -use monitor_client::entities::{ +use komodo_client::entities::{ build::Build, deployment::{ Deployment, DeploymentConfig, DeploymentConfigDiff, @@ -11,9 +11,9 @@ use monitor_client::entities::{ permission::PermissionLevel, resource::Resource, server::Server, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }; use mungos::mongodb::Collection; use periphery_client::api::container::RemoveContainer; @@ -30,7 +30,7 @@ use crate::{ use super::get_check_permissions; -impl super::MonitorResource for Deployment { +impl super::KomodoResource for Deployment { type Config = DeploymentConfig; type PartialConfig = PartialDeploymentConfig; type ConfigDiff = DeploymentConfigDiff; @@ -86,8 +86,12 @@ impl super::MonitorResource for Deployment { }), image: status .as_ref() - .and_then(|s| { - s.curr.container.as_ref().map(|c| c.image.clone()) + .map(|s| { + s.curr + .container + .as_ref() + .and_then(|c| c.image.clone()) + .unwrap_or_else(|| String::from("Unknown")) }) .unwrap_or(build_image), server_id: deployment.config.server_id, diff --git a/bin/core/src/resource/mod.rs b/bin/core/src/resource/mod.rs index 8c8d0a16d..da32624b7 100644 --- a/bin/core/src/resource/mod.rs +++ b/bin/core/src/resource/mod.rs @@ -6,16 +6,16 @@ use std::{ use anyhow::{anyhow, Context}; use formatting::format_serror; use futures::{future::join_all, FutureExt}; -use monitor_client::{ +use komodo_client::{ api::write::CreateTag, entities::{ - monitor_timestamp, + komodo_timestamp, permission::PermissionLevel, resource::{AddFilters, Resource, ResourceQuery}, - to_monitor_name, - update::{ResourceTarget, ResourceTargetVariant, Update}, + to_komodo_name, + update::Update, user::User, - Operation, + Operation, ResourceTarget, ResourceTargetVariant, }, }; use mungos::{ @@ -69,8 +69,8 @@ pub use sync::{ spawn_resource_sync_state_refresh_loop, }; -/// Implement on each monitor resource for common methods -pub trait MonitorResource { +/// Implement on each Komodo resource for common methods +pub trait KomodoResource { type ListItem: Serialize + Send; type Config: Clone + Default @@ -186,7 +186,7 @@ pub trait MonitorResource { // GET // ====== -pub async fn get( +pub async fn get( id_or_name: &str, ) -> anyhow::Result> { T::coll() @@ -202,7 +202,7 @@ pub async fn get( }) } -pub async fn get_check_permissions( +pub async fn get_check_permissions( id_or_name: &str, user: &User, permission_level: PermissionLevel, @@ -235,7 +235,7 @@ pub async fn get_check_permissions( /// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access). #[instrument(level = "debug")] -pub async fn get_resource_ids_for_user( +pub async fn get_resource_ids_for_user( user: &User, ) -> anyhow::Result>> { // Check admin or transparent mode @@ -301,7 +301,7 @@ pub async fn get_resource_ids_for_user( } #[instrument(level = "debug")] -pub async fn get_user_permission_on_resource( +pub async fn get_user_permission_on_resource( user: &User, resource_id: &str, ) -> anyhow::Result { @@ -374,7 +374,7 @@ pub async fn get_user_permission_on_resource( } #[instrument(level = "debug")] -pub async fn list_for_user( +pub async fn list_for_user( mut query: ResourceQuery, user: &User, ) -> anyhow::Result> { @@ -384,7 +384,7 @@ pub async fn list_for_user( list_for_user_using_document::(filters, user).await } -pub async fn list_for_user_using_document( +pub async fn list_for_user_using_document( filters: Document, user: &User, ) -> anyhow::Result> { @@ -396,7 +396,7 @@ pub async fn list_for_user_using_document( } #[instrument(level = "debug")] -pub async fn list_full_for_user( +pub async fn list_full_for_user( mut query: ResourceQuery, user: &User, ) -> anyhow::Result>> { @@ -407,7 +407,7 @@ pub async fn list_full_for_user( } #[instrument(level = "debug")] -async fn list_full_for_user_using_document( +pub async fn list_full_for_user_using_document( mut filters: Document, user: &User, ) -> anyhow::Result>> { @@ -428,13 +428,13 @@ async fn list_full_for_user_using_document( pub type IdResourceMap = HashMap< String, Resource< - ::Config, - ::Info, + ::Config, + ::Info, >, >; #[instrument(level = "debug")] -pub async fn get_id_to_resource_map( +pub async fn get_id_to_resource_map( ) -> anyhow::Result> { let res = find_collect(T::coll().await, None, None) .await @@ -451,25 +451,29 @@ pub async fn get_id_to_resource_map( // CREATE // ======= -pub async fn create( +pub async fn create( name: &str, mut config: T::PartialConfig, user: &User, ) -> anyhow::Result> { if !T::user_can_create(user) { return Err(anyhow!( - "User does not have permissions to create {}", + "User does not have permissions to create {}.", T::resource_type() )); } - let name = to_monitor_name(name); - - if ObjectId::from_str(&name).is_ok() { - return Err(anyhow!("valid ObjectIds cannot be used as names")); + if name.is_empty() { + return Err(anyhow!("Must provide non-empty name for resource.")); } - let start_ts = monitor_timestamp(); + let name = to_komodo_name(name); + + if ObjectId::from_str(&name).is_ok() { + return Err(anyhow!("valid ObjectIds cannot be used as names.")); + } + + let start_ts = komodo_timestamp(); T::validate_create_config(&mut config, user).await?; @@ -531,7 +535,7 @@ pub async fn create( // UPDATE // ======= -pub async fn update( +pub async fn update( id_or_name: &str, mut config: T::PartialConfig, user: &User, @@ -602,7 +606,7 @@ pub async fn update( Ok(updated) } -fn resource_target(id: String) -> ResourceTarget { +fn resource_target(id: String) -> ResourceTarget { match T::resource_type() { ResourceTargetVariant::System => ResourceTarget::System(id), ResourceTargetVariant::Build => ResourceTarget::Build(id), @@ -624,7 +628,7 @@ fn resource_target(id: String) -> ResourceTarget { } } -pub async fn update_description( +pub async fn update_description( id_or_name: &str, description: &str, user: &User, @@ -645,7 +649,7 @@ pub async fn update_description( Ok(()) } -pub async fn update_tags( +pub async fn update_tags( id_or_name: &str, tags: Vec, user: User, @@ -679,7 +683,7 @@ pub async fn update_tags( Ok(()) } -pub async fn remove_tag_from_all( +pub async fn remove_tag_from_all( tag_id: &str, ) -> anyhow::Result<()> { T::coll() @@ -694,7 +698,7 @@ pub async fn remove_tag_from_all( // DELETE // ======= -pub async fn delete( +pub async fn delete( id_or_name: &str, user: &User, ) -> anyhow::Result> { diff --git a/bin/core/src/resource/procedure.rs b/bin/core/src/resource/procedure.rs index 88cbd7d01..15db454f0 100644 --- a/bin/core/src/resource/procedure.rs +++ b/bin/core/src/resource/procedure.rs @@ -1,7 +1,7 @@ use std::time::Duration; use anyhow::{anyhow, Context}; -use monitor_client::{ +use komodo_client::{ api::execute::Execution, entities::{ build::Build, @@ -17,9 +17,9 @@ use monitor_client::{ server::Server, stack::Stack, sync::ResourceSync, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }, }; use mungos::{ @@ -29,7 +29,7 @@ use mungos::{ use crate::state::{action_states, db_client, procedure_state_cache}; -impl super::MonitorResource for Procedure { +impl super::KomodoResource for Procedure { type Config = ProcedureConfig; type PartialConfig = PartialProcedureConfig; type ConfigDiff = ProcedureConfigDiff; @@ -197,7 +197,7 @@ async fn validate_config( .await?; params.deployment = deployment.id; } - Execution::StartContainer(params) => { + Execution::StartDeployment(params) => { let deployment = super::get_check_permissions::( ¶ms.deployment, @@ -207,7 +207,7 @@ async fn validate_config( .await?; params.deployment = deployment.id; } - Execution::RestartContainer(params) => { + Execution::RestartDeployment(params) => { let deployment = super::get_check_permissions::( ¶ms.deployment, @@ -217,7 +217,7 @@ async fn validate_config( .await?; params.deployment = deployment.id; } - Execution::PauseContainer(params) => { + Execution::PauseDeployment(params) => { let deployment = super::get_check_permissions::( ¶ms.deployment, @@ -227,7 +227,7 @@ async fn validate_config( .await?; params.deployment = deployment.id; } - Execution::UnpauseContainer(params) => { + Execution::UnpauseDeployment(params) => { let deployment = super::get_check_permissions::( ¶ms.deployment, @@ -237,7 +237,7 @@ async fn validate_config( .await?; params.deployment = deployment.id; } - Execution::StopContainer(params) => { + Execution::StopDeployment(params) => { let deployment = super::get_check_permissions::( ¶ms.deployment, @@ -247,16 +247,7 @@ async fn validate_config( .await?; params.deployment = deployment.id; } - Execution::StopAllContainers(params) => { - let server = super::get_check_permissions::( - ¶ms.server, - user, - PermissionLevel::Execute, - ) - .await?; - params.server = server.id; - } - Execution::RemoveContainer(params) => { + Execution::DestroyDeployment(params) => { let deployment = super::get_check_permissions::( ¶ms.deployment, @@ -302,6 +293,123 @@ async fn validate_config( .await?; params.repo = repo.id; } + Execution::StartContainer(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::RestartContainer(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::PauseContainer(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::UnpauseContainer(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::StopContainer(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::DestroyContainer(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::StartAllContainers(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::RestartAllContainers(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::PauseAllContainers(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::UnpauseAllContainers(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::StopAllContainers(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::PruneContainers(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::DeleteNetwork(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } Execution::PruneNetworks(params) => { let server = super::get_check_permissions::( ¶ms.server, @@ -311,6 +419,15 @@ async fn validate_config( .await?; params.server = server.id; } + Execution::DeleteImage(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } Execution::PruneImages(params) => { let server = super::get_check_permissions::( ¶ms.server, @@ -320,7 +437,25 @@ async fn validate_config( .await?; params.server = server.id; } - Execution::PruneContainers(params) => { + Execution::DeleteVolume(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::PruneVolumes(params) => { + let server = super::get_check_permissions::( + ¶ms.server, + user, + PermissionLevel::Execute, + ) + .await?; + params.server = server.id; + } + Execution::PruneSystem(params) => { let server = super::get_check_permissions::( ¶ms.server, user, diff --git a/bin/core/src/resource/repo.rs b/bin/core/src/resource/repo.rs index 54b3e5b3f..caf3e5e84 100644 --- a/bin/core/src/resource/repo.rs +++ b/bin/core/src/resource/repo.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context; use formatting::format_serror; -use monitor_client::entities::{ +use komodo_client::entities::{ builder::Builder, permission::PermissionLevel, repo::{ @@ -11,9 +11,9 @@ use monitor_client::entities::{ }, resource::Resource, server::Server, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }; use mungos::{ find::find_collect, @@ -30,7 +30,7 @@ use crate::{ use super::get_check_permissions; -impl super::MonitorResource for Repo { +impl super::KomodoResource for Repo { type Config = RepoConfig; type PartialConfig = PartialRepoConfig; type ConfigDiff = RepoConfigDiff; diff --git a/bin/core/src/resource/server.rs b/bin/core/src/resource/server.rs index ff8f4beee..08acfd7ad 100644 --- a/bin/core/src/resource/server.rs +++ b/bin/core/src/resource/server.rs @@ -1,14 +1,14 @@ use anyhow::Context; -use monitor_client::entities::{ - monitor_timestamp, +use komodo_client::entities::{ + komodo_timestamp, resource::Resource, server::{ PartialServerConfig, Server, ServerConfig, ServerConfigDiff, ServerListItem, ServerListItemInfo, ServerQuerySpecifics, }, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }; use mungos::mongodb::{bson::doc, Collection}; @@ -17,7 +17,7 @@ use crate::{ state::{action_states, db_client, server_status_cache}, }; -impl super::MonitorResource for Server { +impl super::KomodoResource for Server { type Config = ServerConfig; type PartialConfig = PartialServerConfig; type ConfigDiff = ServerConfigDiff; @@ -163,7 +163,7 @@ impl super::MonitorResource for Server { doc! { "target.type": "Server", "target.id": &id }, doc! { "$set": { "resolved": true, - "resolved_ts": monitor_timestamp() + "resolved_ts": komodo_timestamp() } }, ) .await diff --git a/bin/core/src/resource/server_template.rs b/bin/core/src/resource/server_template.rs index 53da0e030..86d48e286 100644 --- a/bin/core/src/resource/server_template.rs +++ b/bin/core/src/resource/server_template.rs @@ -1,4 +1,4 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ resource::Resource, server_template::{ PartialServerTemplateConfig, ServerTemplate, @@ -6,9 +6,9 @@ use monitor_client::entities::{ ServerTemplateConfigVariant, ServerTemplateListItem, ServerTemplateListItemInfo, ServerTemplateQuerySpecifics, }, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - MergePartial, Operation, + MergePartial, Operation, ResourceTargetVariant, }; use mungos::mongodb::{ bson::{to_document, Document}, @@ -17,7 +17,7 @@ use mungos::mongodb::{ use crate::state::db_client; -impl super::MonitorResource for ServerTemplate { +impl super::KomodoResource for ServerTemplate { type Config = ServerTemplateConfig; type PartialConfig = PartialServerTemplateConfig; type ConfigDiff = ServerTemplateConfigDiff; diff --git a/bin/core/src/resource/stack.rs b/bin/core/src/resource/stack.rs index 6b092ea78..ecef40157 100644 --- a/bin/core/src/resource/stack.rs +++ b/bin/core/src/resource/stack.rs @@ -1,6 +1,6 @@ use anyhow::Context; use formatting::format_serror; -use monitor_client::{ +use komodo_client::{ api::write::RefreshStackCache, entities::{ permission::PermissionLevel, @@ -11,9 +11,9 @@ use monitor_client::{ StackInfo, StackListItem, StackListItemInfo, StackQuerySpecifics, StackState, }, - update::{ResourceTargetVariant, Update}, + update::Update, user::{stack_user, User}, - Operation, + Operation, ResourceTargetVariant, }, }; use mungos::mongodb::Collection; @@ -31,7 +31,7 @@ use crate::{ use super::get_check_permissions; -impl super::MonitorResource for Stack { +impl super::KomodoResource for Stack { type Config = StackConfig; type PartialConfig = PartialStackConfig; type ConfigDiff = StackConfigDiff; diff --git a/bin/core/src/resource/sync.rs b/bin/core/src/resource/sync.rs index 6837258d1..4b727b692 100644 --- a/bin/core/src/resource/sync.rs +++ b/bin/core/src/resource/sync.rs @@ -1,9 +1,8 @@ use std::time::Duration; use anyhow::Context; -use mongo_indexed::doc; -use monitor_client::entities::{ - monitor_timestamp, +use komodo_client::entities::{ + komodo_timestamp, resource::Resource, sync::{ PartialResourceSyncConfig, PendingSyncUpdatesData, ResourceSync, @@ -11,10 +10,11 @@ use monitor_client::entities::{ ResourceSyncListItem, ResourceSyncListItemInfo, ResourceSyncQuerySpecifics, ResourceSyncState, }, - update::{ResourceTargetVariant, Update}, + update::Update, user::User, - Operation, + Operation, ResourceTargetVariant, }; +use mongo_indexed::doc; use mungos::{ find::find_collect, mongodb::{options::FindOneOptions, Collection}, @@ -24,7 +24,7 @@ use crate::state::{ action_states, db_client, resource_sync_state_cache, }; -impl super::MonitorResource for ResourceSync { +impl super::KomodoResource for ResourceSync { type Config = ResourceSyncConfig; type PartialConfig = PartialResourceSyncConfig; type ConfigDiff = ResourceSyncConfigDiff; @@ -135,7 +135,7 @@ impl super::MonitorResource for ResourceSync { doc! { "target.type": "ResourceSync", "target.id": &resource.id }, doc! { "$set": { "resolved": true, - "resolved_ts": monitor_timestamp() + "resolved_ts": komodo_timestamp() } }, ) .await diff --git a/bin/core/src/state.rs b/bin/core/src/state.rs index 73fde28c4..db3d18a86 100644 --- a/bin/core/src/state.rs +++ b/bin/core/src/state.rs @@ -4,7 +4,7 @@ use std::{ }; use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ build::BuildState, config::core::{CoreConfig, GithubWebhookAppConfig}, deployment::DeploymentState, diff --git a/bin/core/src/ws.rs b/bin/core/src/ws.rs index 835567d04..d4b673e03 100644 --- a/bin/core/src/ws.rs +++ b/bin/core/src/ws.rs @@ -9,9 +9,9 @@ use axum::{ Router, }; use futures::{SinkExt, StreamExt}; -use monitor_client::{ +use komodo_client::{ entities::{ - permission::PermissionLevel, update::ResourceTarget, user::User, + permission::PermissionLevel, user::User, ResourceTarget, }, ws::WsLoginMessage, }; diff --git a/bin/migrator/Cargo.toml b/bin/migrator/Cargo.toml index a0b27016f..4e88f3462 100644 --- a/bin/migrator/Cargo.toml +++ b/bin/migrator/Cargo.toml @@ -10,7 +10,7 @@ repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -monitor_client.workspace = true +komodo_client.workspace = true logger.workspace = true # mungos.workspace = true @@ -20,5 +20,4 @@ anyhow.workspace = true dotenvy.workspace = true envy.workspace = true serde.workspace = true -tracing.workspace = true -chrono = "0.4" \ No newline at end of file +tracing.workspace = true \ No newline at end of file diff --git a/bin/migrator/Dockerfile b/bin/migrator/Dockerfile index b6366d5f3..a6872527a 100644 --- a/bin/migrator/Dockerfile +++ b/bin/migrator/Dockerfile @@ -9,8 +9,8 @@ FROM gcr.io/distroless/cc-debian12 COPY --from=builder /builder/target/release/migrator / # Label for Ghcr -LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor -LABEL org.opencontainers.image.description="Database migrator for Monitor version upgrades" +LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo +LABEL org.opencontainers.image.description="Database migrator for Komodo version upgrades" LABEL org.opencontainers.image.licenses=GPL-3.0 CMD ["./migrator"] \ No newline at end of file diff --git a/bin/migrator/README.md b/bin/migrator/README.md index 412158644..16627f501 100644 --- a/bin/migrator/README.md +++ b/bin/migrator/README.md @@ -1,42 +1,25 @@ # Migrator -Performs schema changes on the Monitor database +Performs schema changes on the Komodo database ## v1.7 - v1.11 migration Run this before upgrading to latest from versions 1.7 to 1.11. ```sh -docker run --rm --name monitor-migrator \ +docker run --rm --name komodo-migrator \ --network "host" \ --env MIGRATION="v1.11" \ --env TARGET_URI="mongodb://:@
" \ --env TARGET_DB_NAME="" \ - ghcr.io/mbecker20/monitor_migrator + ghcr.io/mbecker20/komodo_migrator ``` ## v1.0 - v1.6 migration Run this before upgrading to latest from versions 1.0 to 1.6. ```sh -docker run --rm --name monitor-migrator \ +docker run --rm --name komodo-migrator \ --network "host" \ --env MIGRATION="v1.6" \ --env TARGET_URI="mongodb://:@
" \ --env TARGET_DB_NAME="" \ - ghcr.io/mbecker20/monitor_migrator -``` - -## v0.X migration -Run this before upgrading to latest from version 0.X. - -Note. As this is a major upgrade, this migration is not "in place". -It will create another database (TARGET) and migrate resources over, leaving the original database (LEGACY) unchanged. - -```sh -docker run --rm --name monitor-migrator \ - --network "host" \ - --env MIGRATION="v0" \ - --env TARGET_URI="mongodb://:@
" \ - --env TARGET_DB_NAME="" \ - --env LEGACY_URI="mongodb://:@
" \ - --env LEGACY_DB_NAME="" \ - ghcr.io/mbecker20/monitor_migrator + ghcr.io/mbecker20/komodo_migrator ``` \ No newline at end of file diff --git a/bin/migrator/src/legacy/mod.rs b/bin/migrator/src/legacy/mod.rs index 6324536d0..1b7141894 100644 --- a/bin/migrator/src/legacy/mod.rs +++ b/bin/migrator/src/legacy/mod.rs @@ -1,4 +1,3 @@ #[allow(unused)] -pub mod v0; pub mod v1_11; pub mod v1_6; diff --git a/bin/migrator/src/legacy/v0/build.rs b/bin/migrator/src/legacy/v0/build.rs deleted file mode 100644 index 0416c9add..000000000 --- a/bin/migrator/src/legacy/v0/build.rs +++ /dev/null @@ -1,253 +0,0 @@ -use anyhow::{anyhow, Context}; -use monitor_client::entities::build::{ - BuildConfig, BuildInfo, ImageRegistry, StandardRegistryConfig, -}; -use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id; -use serde::{Deserialize, Serialize}; - -use super::{ - unix_from_monitor_ts, Command, EnvironmentVar, PermissionsMap, -}; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct Build { - #[serde( - default, - rename = "_id", - skip_serializing_if = "String::is_empty", - with = "hex_string_as_object_id" - )] - pub id: String, - - pub name: String, - - #[serde(default)] - pub description: String, - - #[serde(default)] - pub permissions: PermissionsMap, - - #[serde(default)] - pub skip_secret_interp: bool, - - pub server_id: Option, // server which this image should be built on - - pub aws_config: Option, - - pub version: Version, - - // git related - pub repo: Option, - - pub branch: Option, - - pub github_account: Option, - - // build related - pub pre_build: Option, - - pub docker_build_args: Option, - - pub docker_account: Option, - - pub docker_organization: Option, - - #[serde(default, skip_serializing_if = "String::is_empty")] - pub last_built_at: String, - - #[serde(default, skip_serializing_if = "String::is_empty")] - pub created_at: String, - #[serde(default)] - pub updated_at: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct BuildActionState { - pub building: bool, - pub updating: bool, -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, -)] -pub struct Version { - pub major: i32, - pub minor: i32, - pub patch: i32, -} - -impl std::fmt::Display for Version { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(&format!( - "{}.{}.{}", - self.major, self.minor, self.patch - )) - } -} - -impl TryFrom<&str> for Version { - type Error = anyhow::Error; - - fn try_from(value: &str) -> Result { - let vals = value - .split('.') - .map(|v| { - anyhow::Ok( - v.parse().context("failed at parsing value into i32")?, - ) - }) - .collect::>>()?; - let version = Version { - major: *vals - .first() - .ok_or(anyhow!("must include at least major version"))?, - minor: *vals.get(1).unwrap_or(&0), - patch: *vals.get(2).unwrap_or(&0), - }; - Ok(version) - } -} - -impl Version { - pub fn increment(&mut self) { - self.patch += 1; - } -} - -impl From for monitor_client::entities::Version { - fn from(value: Version) -> Self { - Self { - major: value.major, - minor: value.minor, - patch: value.patch, - } - } -} - -#[derive( - Serialize, Deserialize, Debug, Clone, PartialEq, Default, -)] -pub struct DockerBuildArgs { - pub build_path: String, - pub dockerfile_path: Option, - #[serde(default)] - pub build_args: Vec, - #[serde(default)] - pub extra_args: Vec, - #[serde(default)] - pub use_buildx: bool, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct BuildVersionsReponse { - pub version: Version, - pub ts: String, -} - -#[derive( - Serialize, Deserialize, Debug, Clone, PartialEq, Default, -)] -pub struct AwsBuilderBuildConfig { - pub region: Option, - - pub instance_type: Option, - - pub ami_name: Option, - - pub volume_gb: Option, - - pub subnet_id: Option, - - pub security_group_ids: Option>, - - pub key_pair_name: Option, - - pub assign_public_ip: Option, -} - -impl TryFrom for monitor_client::entities::build::Build { - type Error = anyhow::Error; - fn try_from(value: Build) -> Result { - let ( - build_path, - dockerfile_path, - build_args, - extra_args, - use_buildx, - ) = value - .docker_build_args - .map(|args| { - ( - args.build_path, - args.dockerfile_path.unwrap_or_default(), - args - .build_args - .into_iter() - .map(|arg| monitor_client::entities::EnvironmentVar { - variable: arg.variable, - value: arg.value, - }) - .collect::>(), - args.extra_args, - args.use_buildx, - ) - }) - .unwrap_or_default(); - - let build = Self { - id: value.id, - name: value.name, - description: value.description, - updated_at: unix_from_monitor_ts(&value.updated_at)?, - tags: Vec::new(), - info: BuildInfo { - last_built_at: unix_from_monitor_ts(&value.last_built_at)?, - built_hash: None, - built_message: None, - latest_hash: None, - latest_message: None, - }, - base_permission: Default::default(), - config: BuildConfig { - builder_id: String::new(), - skip_secret_interp: value.skip_secret_interp, - version: value.version.into(), - auto_increment_version: true, - image_name: Default::default(), - image_tag: Default::default(), - git_provider: String::from("github.com"), - git_https: true, - repo: value.repo.unwrap_or_default(), - branch: value.branch.unwrap_or_default(), - git_account: value.github_account.unwrap_or_default(), - image_registry: ImageRegistry::Standard( - StandardRegistryConfig { - domain: String::from("docker.io"), - account: value.docker_account.unwrap_or_default(), - organization: value - .docker_organization - .unwrap_or_default(), - }, - ), - pre_build: value - .pre_build - .map(|command| monitor_client::entities::SystemCommand { - path: command.path, - command: command.command, - }) - .unwrap_or_default(), - build_path, - dockerfile_path, - build_args, - secret_args: Default::default(), - extra_args, - use_buildx, - labels: Default::default(), - webhook_enabled: true, - webhook_secret: Default::default(), - commit: Default::default(), - }, - }; - Ok(build) - } -} diff --git a/bin/migrator/src/legacy/v0/config.rs b/bin/migrator/src/legacy/v0/config.rs deleted file mode 100644 index 027a7d261..000000000 --- a/bin/migrator/src/legacy/v0/config.rs +++ /dev/null @@ -1,201 +0,0 @@ -use std::{collections::HashMap, net::IpAddr, path::PathBuf}; - -use serde::{Deserialize, Serialize}; - -use super::Timelength; - -pub type GithubUsername = String; -pub type GithubToken = String; -pub type GithubAccounts = HashMap; - -pub type DockerUsername = String; -pub type DockerToken = String; -pub type DockerAccounts = HashMap; - -pub type SecretsMap = HashMap; // these are used for injection into deployments run commands - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct CoreConfig { - #[serde(default = "default_title")] - pub title: String, - - // the host to use with oauth redirect url, whatever host the user hits to access monitor. eg 'https://monitor.mogh.tech' - pub host: String, - - // port the core web server runs on - #[serde(default = "default_core_port")] - pub port: u16, - - pub jwt_secret: String, - - #[serde(default = "default_jwt_valid_for")] - pub jwt_valid_for: Timelength, - - // interval at which to collect server stats and alert for out of bounds - pub monitoring_interval: Timelength, - - // daily utc offset in hours to run daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight. - #[serde(default)] - pub daily_offset_hours: u8, - - // number of days to keep stats, or 0 to disable pruning. stats older than this number of days are deleted on a daily cycle - #[serde(default)] - pub keep_stats_for_days: u64, - - // used to verify validity from github webhooks - pub github_webhook_secret: String, - - // used to form the frontend listener url, if None will use 'host'. - pub github_webhook_base_url: Option, - - // sent in auth header with req to periphery - pub passkey: String, - - // integration with slack app - pub slack_url: Option, - - // enable login with local auth - pub local_auth: bool, - - // allowed docker orgs used with monitor. first in this list will be default for build - #[serde(default)] - pub docker_organizations: Vec, - - pub mongo: MongoConfig, - - #[serde(default)] - pub github_oauth: OauthCredentials, - - #[serde(default)] - pub google_oauth: OauthCredentials, - - #[serde(default)] - pub aws: AwsBuilderConfig, -} - -fn default_title() -> String { - String::from("monitor") -} - -fn default_core_port() -> u16 { - 9000 -} - -fn default_jwt_valid_for() -> Timelength { - Timelength::OneWeek -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct OauthCredentials { - #[serde(default)] - pub enabled: bool, - #[serde(default)] - pub id: String, - #[serde(default)] - pub secret: String, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct MongoConfig { - pub uri: String, - #[serde(default = "default_core_mongo_app_name")] - pub app_name: String, - #[serde(default = "default_core_mongo_db_name")] - pub db_name: String, -} - -fn default_core_mongo_app_name() -> String { - "monitor_core".to_string() -} - -fn default_core_mongo_db_name() -> String { - "monitor".to_string() -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct AwsBuilderConfig { - #[serde(skip_serializing)] - pub access_key_id: String, - - #[serde(skip_serializing)] - pub secret_access_key: String, - - pub default_ami_name: String, - pub default_subnet_id: String, - pub default_key_pair_name: String, - - #[serde(default)] - pub available_ami_accounts: AvailableAmiAccounts, - - #[serde(default = "default_aws_region")] - pub default_region: String, - - #[serde(default = "default_volume_gb")] - pub default_volume_gb: i32, - - #[serde(default = "default_instance_type")] - pub default_instance_type: String, - - #[serde(default)] - pub default_security_group_ids: Vec, - - #[serde(default)] - pub default_assign_public_ip: bool, -} - -fn default_aws_region() -> String { - String::from("us-east-1") -} - -fn default_volume_gb() -> i32 { - 8 -} - -fn default_instance_type() -> String { - String::from("m5.2xlarge") -} - -pub type AvailableAmiAccounts = HashMap; // (ami_name, AmiAccounts) - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct AmiAccounts { - pub ami_id: String, - #[serde(default)] - pub github: Vec, - #[serde(default)] - pub docker: Vec, - #[serde(default)] - pub secrets: Vec, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct PeripheryConfig { - #[serde(default = "default_periphery_port")] - pub port: u16, - #[serde(default = "default_repo_dir")] - pub repo_dir: PathBuf, - #[serde(default = "default_stats_refresh_interval")] - pub stats_polling_rate: Timelength, - #[serde(default)] - pub allowed_ips: Vec, - #[serde(default)] - pub passkeys: Vec, - #[serde(default)] - pub secrets: SecretsMap, - #[serde(default)] - pub github_accounts: GithubAccounts, - #[serde(default)] - pub docker_accounts: DockerAccounts, -} - -fn default_periphery_port() -> u16 { - 8000 -} - -fn default_repo_dir() -> PathBuf { - "/repos".parse().unwrap() -} - -fn default_stats_refresh_interval() -> Timelength { - Timelength::FiveSeconds -} diff --git a/bin/migrator/src/legacy/v0/deployment.rs b/bin/migrator/src/legacy/v0/deployment.rs deleted file mode 100644 index c7812a8db..000000000 --- a/bin/migrator/src/legacy/v0/deployment.rs +++ /dev/null @@ -1,386 +0,0 @@ -use monitor_client::entities::{ - build::{ImageRegistry, StandardRegistryConfig}, - NoData, -}; -use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id; -use serde::{Deserialize, Serialize}; - -use crate::legacy::v0::unix_from_monitor_ts; - -use super::{Command, EnvironmentVar, PermissionsMap, Version}; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct Deployment { - #[serde( - default, - rename = "_id", - skip_serializing_if = "String::is_empty", - with = "hex_string_as_object_id" - )] - pub id: String, - - pub name: String, // must be formatted to be compat with docker - - #[serde(default)] - pub description: String, - - pub server_id: String, - - #[serde(default)] - pub permissions: PermissionsMap, - - #[serde(default)] - pub skip_secret_interp: bool, - - pub docker_run_args: DockerRunArgs, - - #[serde(default = "default_term_signal_labels")] - pub term_signal_labels: Vec, - - #[serde(default)] - pub termination_signal: TerminationSignal, - - #[serde(default = "default_termination_timeout")] - pub termination_timeout: i32, - - pub build_id: Option, - - #[serde(default)] - pub redeploy_on_build: bool, - - pub build_version: Option, - - // deployment repo related - pub repo: Option, - - pub branch: Option, - - pub github_account: Option, - - pub on_clone: Option, - - pub on_pull: Option, - - pub repo_mount: Option, - - #[serde(default, skip_serializing_if = "String::is_empty")] - pub created_at: String, - #[serde(default)] - pub updated_at: String, -} - -fn default_termination_timeout() -> i32 { - 10 -} - -fn default_term_signal_labels() -> Vec { - vec![TerminationSignalLabel::default()] -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct DeploymentWithContainerState { - pub deployment: Deployment, - pub state: DockerContainerState, - pub container: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct DeploymentActionState { - pub deploying: bool, - pub stopping: bool, - pub starting: bool, - pub removing: bool, - pub pulling: bool, - pub recloning: bool, - pub updating: bool, - pub renaming: bool, -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, -)] -pub struct TerminationSignalLabel { - pub signal: TerminationSignal, - pub label: String, -} - -impl From - for monitor_client::entities::deployment::TerminationSignalLabel -{ - fn from(value: TerminationSignalLabel) -> Self { - Self { - signal: value.signal.into(), - label: value.label, - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DockerRunArgs { - pub image: String, - - #[serde(default)] - pub ports: Vec, - - #[serde(default)] - pub volumes: Vec, - - #[serde(default)] - pub environment: Vec, - - #[serde(default = "default_network")] - pub network: String, - - #[serde(default)] - pub restart: RestartMode, - - pub post_image: Option, - - pub container_user: Option, - - #[serde(default)] - pub extra_args: Vec, - - pub docker_account: Option, // the username of the dockerhub account -} - -impl Default for DockerRunArgs { - fn default() -> DockerRunArgs { - DockerRunArgs { - network: "host".to_string(), - image: Default::default(), - ports: Default::default(), - volumes: Default::default(), - environment: Default::default(), - restart: Default::default(), - post_image: Default::default(), - container_user: Default::default(), - extra_args: Default::default(), - docker_account: Default::default(), - } - } -} - -fn default_network() -> String { - String::from("host") -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct BasicContainerInfo { - pub name: String, - pub id: String, - pub image: String, - pub state: DockerContainerState, - pub status: Option, -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, -)] -pub struct Conversion { - pub local: String, - pub container: String, -} - -impl From - for monitor_client::entities::deployment::Conversion -{ - fn from(value: Conversion) -> Self { - Self { - local: value.local, - container: value.container, - } - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DockerContainerStats { - #[serde(alias = "Name")] - pub name: String, - #[serde(alias = "CPUPerc")] - pub cpu_perc: String, - #[serde(alias = "MemPerc")] - pub mem_perc: String, - #[serde(alias = "MemUsage")] - pub mem_usage: String, - #[serde(alias = "NetIO")] - pub net_io: String, - #[serde(alias = "BlockIO")] - pub block_io: String, - #[serde(alias = "PIDs")] - pub pids: String, -} - -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, -)] -#[serde(rename_all = "snake_case")] -pub enum DockerContainerState { - #[default] - Unknown, - NotDeployed, - Created, - Restarting, - Running, - Removing, - Paused, - Exited, - Dead, -} - -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, -)] -pub enum RestartMode { - #[default] - #[serde(rename = "no")] - NoRestart, - #[serde(rename = "on-failure")] - OnFailure, - #[serde(rename = "always")] - Always, - #[serde(rename = "unless-stopped")] - UnlessStopped, -} - -impl From - for monitor_client::entities::deployment::RestartMode -{ - fn from(value: RestartMode) -> Self { - use monitor_client::entities::deployment::RestartMode::*; - match value { - RestartMode::NoRestart => NoRestart, - RestartMode::OnFailure => OnFailure, - RestartMode::Always => Always, - RestartMode::UnlessStopped => UnlessStopped, - } - } -} - -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, -)] -#[serde(rename_all = "UPPERCASE")] -#[allow(clippy::enum_variant_names)] -pub enum TerminationSignal { - #[serde(alias = "1")] - SigHup, - #[serde(alias = "2")] - SigInt, - #[serde(alias = "3")] - SigQuit, - #[default] - #[serde(alias = "15")] - SigTerm, -} - -impl From - for monitor_client::entities::deployment::TerminationSignal -{ - fn from(value: TerminationSignal) -> Self { - use monitor_client::entities::deployment::TerminationSignal::*; - match value { - TerminationSignal::SigHup => SigHup, - TerminationSignal::SigInt => SigInt, - TerminationSignal::SigQuit => SigQuit, - TerminationSignal::SigTerm => SigTerm, - } - } -} - -impl TryFrom - for monitor_client::entities::deployment::Deployment -{ - type Error = anyhow::Error; - fn try_from(value: Deployment) -> Result { - let image = if let Some(build_id) = value.build_id { - monitor_client::entities::deployment::DeploymentImage::Build { - build_id, - version: value.build_version.unwrap_or_default().into(), - } - } else { - monitor_client::entities::deployment::DeploymentImage::Image { - image: value.docker_run_args.image, - } - }; - let deployment = Self { - id: value.id, - name: value.name, - description: value.description, - base_permission: Default::default(), - updated_at: unix_from_monitor_ts(&value.updated_at)?, - tags: Vec::new(), - info: (), - config: - monitor_client::entities::deployment::DeploymentConfig { - server_id: value.server_id, - send_alerts: true, - image, - skip_secret_interp: value.skip_secret_interp, - redeploy_on_build: value.redeploy_on_build, - term_signal_labels: value - .term_signal_labels - .into_iter() - .map(|t| t.into()) - .collect(), - termination_signal: value.termination_signal.into(), - termination_timeout: value.termination_timeout, - ports: value - .docker_run_args - .ports - .into_iter() - .map(|p| p.into()) - .collect(), - volumes: value - .docker_run_args - .volumes - .into_iter() - .map(|v| v.into()) - .collect(), - environment: value - .docker_run_args - .environment - .into_iter() - .map(|e| e.into()) - .collect(), - network: value.docker_run_args.network, - restart: value.docker_run_args.restart.into(), - command: value - .docker_run_args - .post_image - .unwrap_or_default(), - extra_args: value.docker_run_args.extra_args, - image_registry_account: value - .docker_run_args - .docker_account - .unwrap_or_default(), - labels: Default::default(), - }, - }; - Ok(deployment) - } -} diff --git a/bin/migrator/src/legacy/v0/mod.rs b/bin/migrator/src/legacy/v0/mod.rs deleted file mode 100644 index 720631ac0..000000000 --- a/bin/migrator/src/legacy/v0/mod.rs +++ /dev/null @@ -1,342 +0,0 @@ -use std::collections::HashMap; - -use anyhow::Context; -use chrono::DateTime; -use mungos::{init::MongoBuilder, mongodb::Collection}; -use serde::{Deserialize, Serialize}; - -mod build; -mod config; -mod deployment; -mod server; -mod update; -mod user; - -pub use build::*; -pub use config::*; -pub use deployment::*; -pub use server::*; -pub use update::*; -pub use user::*; - -pub struct DbClient { - pub users: Collection, - pub servers: Collection, - pub deployments: Collection, - pub builds: Collection, - pub updates: Collection, -} - -impl DbClient { - pub async fn new( - legacy_uri: &str, - legacy_db_name: &str, - ) -> DbClient { - let client = MongoBuilder::default() - .uri(legacy_uri) - .build() - .await - .expect("failed to init legacy mongo client"); - let db = client.database(legacy_db_name); - DbClient { - users: db.collection("users"), - servers: db.collection("servers"), - deployments: db.collection("deployments"), - builds: db.collection("builds"), - updates: db.collection("updates"), - } - } -} - -pub type PermissionsMap = HashMap; - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct CloneArgs { - pub name: String, - pub repo: Option, - pub branch: Option, - pub on_clone: Option, - pub on_pull: Option, - pub github_account: Option, -} - -#[derive( - Serialize, Deserialize, Debug, Default, Clone, PartialEq, Eq, -)] -pub struct Command { - #[serde(default)] - pub path: String, - #[serde(default)] - pub command: String, -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, -)] -pub struct EnvironmentVar { - pub variable: String, - pub value: String, -} - -impl From - for monitor_client::entities::EnvironmentVar -{ - fn from(value: EnvironmentVar) -> Self { - Self { - variable: value.variable, - value: value.value, - } - } -} - -#[derive(Deserialize, Debug)] -pub struct UserCredentials { - pub username: String, - pub password: String, -} - -#[derive( - Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, Copy, -)] -#[serde(rename_all = "snake_case")] -pub enum AccountType { - Github, - Docker, -} - -#[derive( - Serialize, - Deserialize, - Debug, - Default, - PartialEq, - Hash, - Eq, - Clone, - Copy, -)] -#[serde(rename_all = "snake_case")] -pub enum Operation { - // do nothing - #[default] - None, - - // server - CreateServer, - UpdateServer, - DeleteServer, - PruneImagesServer, - PruneContainersServer, - PruneNetworksServer, - RenameServer, - - // build - CreateBuild, - UpdateBuild, - DeleteBuild, - BuildBuild, - - // deployment - CreateDeployment, - UpdateDeployment, - DeleteDeployment, - DeployContainer, - StopContainer, - StartContainer, - RemoveContainer, - PullDeployment, - RecloneDeployment, - RenameDeployment, - - // procedure - CreateProcedure, - UpdateProcedure, - DeleteProcedure, - - // command - CreateCommand, - UpdateCommand, - DeleteCommand, - RunCommand, - - // group - CreateGroup, - UpdateGroup, - DeleteGroup, - - // user - ModifyUserEnabled, - ModifyUserCreateServerPermissions, - ModifyUserCreateBuildPermissions, - ModifyUserPermissions, - - // github webhook automation - AutoBuild, - AutoPull, -} - -impl From for monitor_client::entities::Operation { - fn from(value: Operation) -> Self { - use monitor_client::entities::Operation::*; - match value { - Operation::None => None, - Operation::CreateServer => CreateServer, - Operation::UpdateServer => UpdateServer, - Operation::DeleteServer => DeleteServer, - Operation::PruneImagesServer => PruneImages, - Operation::PruneContainersServer => PruneContainers, - Operation::PruneNetworksServer => PruneNetworks, - Operation::RenameServer => RenameServer, - Operation::CreateBuild => CreateBuild, - Operation::UpdateBuild => UpdateBuild, - Operation::DeleteBuild => DeleteBuild, - Operation::BuildBuild => RunBuild, - Operation::CreateDeployment => CreateDeployment, - Operation::UpdateDeployment => UpdateDeployment, - Operation::DeleteDeployment => DeleteDeployment, - Operation::DeployContainer => Deploy, - Operation::StopContainer => StopContainer, - Operation::StartContainer => StartContainer, - Operation::RemoveContainer => RemoveContainer, - Operation::PullDeployment => None, - Operation::RecloneDeployment => None, - Operation::RenameDeployment => RenameDeployment, - Operation::CreateProcedure => None, - Operation::UpdateProcedure => None, - Operation::DeleteProcedure => None, - Operation::CreateCommand => None, - Operation::UpdateCommand => None, - Operation::DeleteCommand => None, - Operation::RunCommand => None, - Operation::CreateGroup => None, - Operation::UpdateGroup => None, - Operation::DeleteGroup => None, - Operation::ModifyUserEnabled => None, - Operation::ModifyUserCreateServerPermissions => None, - Operation::ModifyUserCreateBuildPermissions => None, - Operation::ModifyUserPermissions => None, - Operation::AutoBuild => RunBuild, - Operation::AutoPull => PullRepo, - } - } -} - -#[derive( - Serialize, - Deserialize, - Debug, - Hash, - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Default, -)] -#[serde(rename_all = "snake_case")] -pub enum PermissionLevel { - #[default] - None, - Read, - Execute, - Update, -} - -impl Default for &PermissionLevel { - fn default() -> Self { - &PermissionLevel::None - } -} - -impl From - for monitor_client::entities::permission::PermissionLevel -{ - fn from(value: PermissionLevel) -> Self { - use monitor_client::entities::permission::PermissionLevel::*; - match value { - PermissionLevel::None => None, - PermissionLevel::Read => Read, - PermissionLevel::Execute => Execute, - PermissionLevel::Update => Write, - } - } -} - -#[derive( - Serialize, Deserialize, Debug, PartialEq, Hash, Eq, Clone, Copy, -)] -#[serde(rename_all = "snake_case")] -pub enum PermissionsTarget { - Server, - Deployment, - Build, - Procedure, - Group, -} - -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, -)] -#[serde(rename_all = "snake_case")] -pub enum Timelength { - #[serde(rename = "1-sec")] - OneSecond, - #[serde(rename = "5-sec")] - FiveSeconds, - #[serde(rename = "10-sec")] - TenSeconds, - #[serde(rename = "15-sec")] - FifteenSeconds, - #[serde(rename = "30-sec")] - ThirtySeconds, - #[default] - #[serde(rename = "1-min")] - OneMinute, - #[serde(rename = "2-min")] - TwoMinutes, - #[serde(rename = "5-min")] - FiveMinutes, - #[serde(rename = "10-min")] - TenMinutes, - #[serde(rename = "15-min")] - FifteenMinutes, - #[serde(rename = "30-min")] - ThirtyMinutes, - #[serde(rename = "1-hr")] - OneHour, - #[serde(rename = "2-hr")] - TwoHours, - #[serde(rename = "6-hr")] - SixHours, - #[serde(rename = "8-hr")] - EightHours, - #[serde(rename = "12-hr")] - TwelveHours, - #[serde(rename = "1-day")] - OneDay, - #[serde(rename = "3-day")] - ThreeDay, - #[serde(rename = "1-wk")] - OneWeek, - #[serde(rename = "2-wk")] - TwoWeeks, - #[serde(rename = "30-day")] - ThirtyDays, -} - -pub fn unix_from_monitor_ts(ts: &str) -> anyhow::Result { - Ok( - DateTime::parse_from_rfc3339(ts) - .context("failed to parse rfc3339 timestamp")? - .timestamp_millis(), - ) -} diff --git a/bin/migrator/src/legacy/v0/server.rs b/bin/migrator/src/legacy/v0/server.rs deleted file mode 100644 index 032695ae2..000000000 --- a/bin/migrator/src/legacy/v0/server.rs +++ /dev/null @@ -1,321 +0,0 @@ -use std::path::PathBuf; - -use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id; -use serde::{Deserialize, Serialize}; - -use super::{unix_from_monitor_ts, PermissionsMap, Timelength}; - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Server { - #[serde( - default, - rename = "_id", - skip_serializing_if = "String::is_empty", - with = "hex_string_as_object_id" - )] - pub id: String, - - pub name: String, - - #[serde(default)] - pub description: String, - - pub address: String, - - #[serde(default)] - pub permissions: PermissionsMap, - - pub enabled: bool, - - #[serde(default)] - pub to_notify: Vec, // slack users to notify - - #[serde(default)] - pub auto_prune: bool, - - #[serde(default = "default_cpu_alert")] - pub cpu_alert: f32, - - #[serde(default = "default_mem_alert")] - pub mem_alert: f64, - - #[serde(default = "default_disk_alert")] - pub disk_alert: f64, - - #[serde(default)] - pub stats_interval: Timelength, - - pub region: Option, - - pub instance_id: Option, - - #[serde(default, skip_serializing_if = "String::is_empty")] - pub created_at: String, - #[serde(default)] - pub updated_at: String, -} - -impl Default for Server { - fn default() -> Self { - Self { - id: Default::default(), - name: Default::default(), - address: Default::default(), - permissions: Default::default(), - enabled: true, - auto_prune: true, - to_notify: Default::default(), - cpu_alert: default_cpu_alert(), - mem_alert: default_mem_alert(), - disk_alert: default_disk_alert(), - stats_interval: Default::default(), - region: Default::default(), - instance_id: Default::default(), - description: Default::default(), - created_at: Default::default(), - updated_at: Default::default(), - } - } -} - -fn default_cpu_alert() -> f32 { - 95.0 -} - -fn default_mem_alert() -> f64 { - 80.0 -} - -fn default_disk_alert() -> f64 { - 75.0 -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ServerWithStatus { - pub server: Server, - pub status: ServerStatus, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct ServerActionState { - pub pruning_networks: bool, - pub pruning_containers: bool, - pub pruning_images: bool, -} - -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, -)] -#[serde(rename_all = "snake_case")] -pub enum ServerStatus { - Ok, - #[default] - NotOk, - Disabled, -} - -#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)] -pub struct SystemStatsQuery { - #[serde(default)] - pub cpus: bool, - #[serde(default)] - pub disks: bool, - #[serde(default)] - pub networks: bool, - #[serde(default)] - pub components: bool, - #[serde(default)] - pub processes: bool, -} - -#[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct SystemStats { - #[serde(default)] - pub system_load: f64, - pub cpu_perc: f32, - pub cpu_freq_mhz: f64, - pub mem_used_gb: f64, // in GB - pub mem_total_gb: f64, // in GB - pub disk: DiskUsage, - #[serde(default)] - pub cpus: Vec, - #[serde(default)] - pub networks: Vec, - #[serde(default)] - pub components: Vec, - #[serde(default)] - pub processes: Vec, - pub polling_rate: Timelength, - pub refresh_ts: u128, - pub refresh_list_ts: u128, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SingleCpuUsage { - pub name: String, - pub usage: f32, -} - -#[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct DiskUsage { - pub used_gb: f64, // in GB - pub total_gb: f64, // in GB - pub read_kb: f64, // in kB - pub write_kb: f64, // in kB - #[serde(default)] - pub disks: Vec, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SingleDiskUsage { - pub mount: PathBuf, - pub used_gb: f64, // in GB - pub total_gb: f64, // in GB -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SystemNetwork { - pub name: String, - pub recieved_kb: f64, // in kB - pub transmitted_kb: f64, // in kB -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SystemComponent { - pub label: String, - pub temp: f32, - pub max: f32, - #[serde(skip_serializing_if = "Option::is_none")] - pub critical: Option, -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SystemProcess { - pub pid: u32, - pub name: String, - #[serde(default, skip_serializing_if = "String::is_empty")] - pub exe: String, - pub cmd: Vec, - #[serde(default)] - pub start_time: f64, - pub cpu_perc: f32, - pub mem_mb: f64, - pub disk_read_kb: f64, - pub disk_write_kb: f64, -} - -#[derive(Serialize, Deserialize, Debug, Default, Clone)] -pub struct SystemStatsRecord { - #[serde( - default, - rename = "_id", - skip_serializing_if = "String::is_empty", - with = "hex_string_as_object_id" - )] - pub id: String, - pub server_id: String, - pub ts: f64, // unix ts milliseconds - #[serde(default)] - pub system_load: f64, - pub cpu_perc: f32, // in % - #[serde(default)] - pub cpu_freq_mhz: f64, // in MHz - pub mem_used_gb: f64, // in GB - pub mem_total_gb: f64, // in GB - pub disk: DiskUsage, - #[serde(default)] - pub cpus: Vec, - #[serde(default)] - pub networks: Vec, - #[serde(default)] - pub components: Vec, - #[serde(default)] - pub processes: Vec, - pub polling_rate: Timelength, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct HistoricalStatsQuery { - #[serde(default = "default_interval")] - pub interval: Timelength, - #[serde(default = "default_limit")] - pub limit: f64, - #[serde(default)] - pub page: f64, - #[serde(default)] - pub networks: bool, - #[serde(default)] - pub components: bool, -} - -impl Default for HistoricalStatsQuery { - fn default() -> Self { - HistoricalStatsQuery { - interval: default_interval(), - limit: default_limit(), - page: Default::default(), - networks: Default::default(), - components: Default::default(), - } - } -} - -fn default_interval() -> Timelength { - Timelength::OneHour -} - -fn default_limit() -> f64 { - 100.0 -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct SystemInformation { - pub name: Option, - pub os: Option, - pub kernel: Option, - pub core_count: Option, - pub host_name: Option, - pub cpu_brand: String, -} - -impl TryFrom for monitor_client::entities::server::Server { - type Error = anyhow::Error; - fn try_from(value: Server) -> Result { - let server = Self { - id: value.id, - name: value.name, - description: value.description, - base_permission: Default::default(), - updated_at: unix_from_monitor_ts(&value.updated_at)?, - tags: Vec::new(), - info: (), - config: monitor_client::entities::server::ServerConfig { - address: value.address, - enabled: value.enabled, - ignore_mounts: Default::default(), - auto_prune: value.auto_prune, - send_unreachable_alerts: true, - stats_monitoring: true, - send_cpu_alerts: true, - send_mem_alerts: true, - send_disk_alerts: true, - region: value.region.unwrap_or_default(), - cpu_warning: value.cpu_alert, - cpu_critical: value.cpu_alert, - mem_warning: value.mem_alert, - mem_critical: value.mem_alert, - disk_warning: value.disk_alert, - disk_critical: value.disk_alert, - }, - }; - Ok(server) - } -} diff --git a/bin/migrator/src/legacy/v0/update.rs b/bin/migrator/src/legacy/v0/update.rs deleted file mode 100644 index 8aa0e367c..000000000 --- a/bin/migrator/src/legacy/v0/update.rs +++ /dev/null @@ -1,144 +0,0 @@ -use monitor_client::entities::update::ResourceTarget; -use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id; -use serde::{Deserialize, Serialize}; - -use super::{unix_from_monitor_ts, Operation, Version}; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct Update { - #[serde( - default, - rename = "_id", - skip_serializing_if = "String::is_empty", - with = "hex_string_as_object_id" - )] - pub id: String, - pub target: UpdateTarget, - pub operation: Operation, - pub logs: Vec, - pub start_ts: String, - pub end_ts: Option, - pub status: UpdateStatus, - pub success: bool, - pub operator: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub version: Option, -} - -impl TryFrom for monitor_client::entities::update::Update { - type Error = anyhow::Error; - fn try_from(value: Update) -> Result { - let target: Option = value.target.into(); - let update = Self { - id: value.id, - operation: value.operation.into(), - start_ts: unix_from_monitor_ts(&value.start_ts)?, - success: value.success, - operator: value.operator, - target: target.unwrap_or_default(), - logs: value - .logs - .into_iter() - .map(|log| log.try_into()) - .collect::, - >>()?, - end_ts: value - .end_ts - .and_then(|ts| unix_from_monitor_ts(&ts).ok()), - status: value.status.into(), - version: value.version.map(|v| v.into()).unwrap_or_default(), - commit_hash: Default::default(), - other_data: Default::default(), - }; - Ok(update) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct Log { - pub stage: String, - pub command: String, - pub stdout: String, - pub stderr: String, - pub success: bool, - pub start_ts: String, - pub end_ts: String, -} - -impl TryFrom for monitor_client::entities::update::Log { - type Error = anyhow::Error; - fn try_from(value: Log) -> Result { - Ok(Self { - stage: value.stage, - command: value.command, - stdout: value.stdout, - stderr: value.stderr, - success: value.success, - start_ts: unix_from_monitor_ts(&value.start_ts)?, - end_ts: unix_from_monitor_ts(&value.end_ts)?, - }) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -#[serde(tag = "type", content = "id")] -pub enum UpdateTarget { - #[default] - System, - Build(String), - Deployment(String), - Server(String), - Procedure(String), - Group(String), - Command(String), -} - -impl From - for Option -{ - fn from(value: UpdateTarget) -> Self { - use monitor_client::entities::update::ResourceTarget::*; - match value { - UpdateTarget::System => Some(System("system".to_string())), - UpdateTarget::Build(id) => Some(Build(id)), - UpdateTarget::Deployment(id) => Some(Deployment(id)), - UpdateTarget::Server(id) => Some(Server(id)), - UpdateTarget::Procedure(_) => None, - UpdateTarget::Group(_) => None, - UpdateTarget::Command(_) => None, - } - } -} - -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, -)] -#[serde(rename_all = "snake_case")] -pub enum UpdateStatus { - Queued, - InProgress, - #[default] - Complete, -} - -impl From - for monitor_client::entities::update::UpdateStatus -{ - fn from(value: UpdateStatus) -> Self { - use monitor_client::entities::update::UpdateStatus::*; - match value { - UpdateStatus::Queued => Queued, - UpdateStatus::InProgress => InProgress, - UpdateStatus::Complete => Complete, - } - } -} diff --git a/bin/migrator/src/legacy/v0/user.rs b/bin/migrator/src/legacy/v0/user.rs deleted file mode 100644 index e8682ffa9..000000000 --- a/bin/migrator/src/legacy/v0/user.rs +++ /dev/null @@ -1,115 +0,0 @@ -use anyhow::anyhow; -use monitor_client::entities::user::UserConfig; -use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id; -use serde::{Deserialize, Serialize}; - -use super::unix_from_monitor_ts; - -#[derive(Serialize, Deserialize, Debug, Clone, Default)] -pub struct User { - #[serde( - default, - rename = "_id", - skip_serializing_if = "String::is_empty", - with = "hex_string_as_object_id" - )] - pub id: String, - - pub username: String, - - #[serde(default)] - pub enabled: bool, - - #[serde(default)] - pub admin: bool, - - #[serde(default)] - pub create_server_permissions: bool, - - #[serde(default)] - pub create_build_permissions: bool, - - #[serde(skip_serializing_if = "Option::is_none")] - pub avatar: Option, - - // used with auth - #[serde(default)] - pub secrets: Vec, - - #[serde(skip_serializing_if = "Option::is_none")] - pub password: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub github_id: Option, - - #[serde(skip_serializing_if = "Option::is_none")] - pub google_id: Option, - - #[serde(default, skip_serializing_if = "String::is_empty")] - pub created_at: String, - #[serde(default)] - pub updated_at: String, -} - -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, -)] -pub struct ApiSecret { - pub name: String, - #[serde(default, skip_serializing_if = "String::is_empty")] - pub hash: String, - pub created_at: String, - pub expires: Option, -} - -// impl TryFrom -// for monitor_client::entities::user::ApiSecret -// { -// type Error = anyhow::Error; -// fn try_from(value: ApiSecret) -> Result { -// let secret = Self { -// name: value.name, -// hash: value.hash, -// created_at: unix_from_monitor_ts(&value.created_at)?, -// expires: value -// .expires -// .and_then(|exp| unix_from_monitor_ts(&exp).ok()), -// }; -// Ok(secret) -// } -// } - -impl TryFrom for monitor_client::entities::user::User { - type Error = anyhow::Error; - fn try_from(value: User) -> Result { - let config = - match (value.password, value.github_id, value.google_id) { - (Some(password), _, _) => UserConfig::Local { password }, - (None, Some(github_id), _) => UserConfig::Github { - github_id, - avatar: value.avatar.unwrap_or_default(), - }, - (None, None, Some(google_id)) => UserConfig::Google { - google_id, - avatar: value.avatar.unwrap_or_default(), - }, - _ => { - return Err(anyhow!("user is not local, github, or google")) - } - }; - let user = Self { - config, - id: value.id, - username: value.username, - enabled: value.enabled, - admin: value.admin, - create_server_permissions: value.create_server_permissions, - create_build_permissions: value.create_build_permissions, - last_update_view: Default::default(), - recents: Default::default(), - all: Default::default(), - updated_at: unix_from_monitor_ts(&value.updated_at)?, - }; - Ok(user) - } -} diff --git a/bin/migrator/src/legacy/v1_11/build.rs b/bin/migrator/src/legacy/v1_11/build.rs index 0782c8646..53928826f 100644 --- a/bin/migrator/src/legacy/v1_11/build.rs +++ b/bin/migrator/src/legacy/v1_11/build.rs @@ -1,4 +1,4 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ build::StandardRegistryConfig, EnvironmentVar, NoData, SystemCommand, Version, I64, }; @@ -8,15 +8,15 @@ use super::resource::Resource; pub type Build = Resource; -impl From for monitor_client::entities::build::Build { +impl From for komodo_client::entities::build::Build { fn from(value: Build) -> Self { - monitor_client::entities::build::Build { + komodo_client::entities::build::Build { id: value.id, name: value.name, description: value.description, updated_at: value.updated_at, tags: value.tags, - info: monitor_client::entities::build::BuildInfo { + info: komodo_client::entities::build::BuildInfo { last_built_at: value.info.last_built_at, built_hash: None, built_message: None, @@ -99,7 +99,7 @@ pub struct BuildConfig { /// These values are visible in the final image by running `docker inspect`. #[serde( default, - deserialize_with = "monitor_client::entities::env_vars_deserializer" + deserialize_with = "komodo_client::entities::env_vars_deserializer" )] pub build_args: Vec, @@ -115,26 +115,26 @@ pub struct BuildConfig { /// ``` #[serde( default, - deserialize_with = "monitor_client::entities::env_vars_deserializer" + deserialize_with = "komodo_client::entities::env_vars_deserializer" )] pub secret_args: Vec, /// Docker labels #[serde( default, - deserialize_with = "monitor_client::entities::env_vars_deserializer" + deserialize_with = "komodo_client::entities::env_vars_deserializer" )] pub labels: Vec, } impl From - for monitor_client::entities::build::BuildConfig + for komodo_client::entities::build::BuildConfig { fn from(value: BuildConfig) -> Self { - monitor_client::entities::build::BuildConfig { + komodo_client::entities::build::BuildConfig { builder_id: value.builder_id, skip_secret_interp: value.skip_secret_interp, - version: monitor_client::entities::Version { + version: komodo_client::entities::Version { major: value.version.major, minor: value.version.minor, patch: value.version.patch, @@ -148,7 +148,7 @@ impl From branch: value.branch, commit: value.commit, git_account: value.github_account, - pre_build: monitor_client::entities::SystemCommand { + pre_build: komodo_client::entities::SystemCommand { path: value.pre_build.path, command: value.pre_build.command, }, @@ -213,17 +213,15 @@ impl Default for ImageRegistry { } impl From - for monitor_client::entities::build::ImageRegistry + for komodo_client::entities::build::ImageRegistry { fn from(value: ImageRegistry) -> Self { match value { ImageRegistry::None(_) | ImageRegistry::Custom(_) => { - monitor_client::entities::build::ImageRegistry::None( - NoData {}, - ) + komodo_client::entities::build::ImageRegistry::None(NoData {}) } ImageRegistry::DockerHub(params) => { - monitor_client::entities::build::ImageRegistry::Standard( + komodo_client::entities::build::ImageRegistry::Standard( StandardRegistryConfig { domain: String::from("docker.io"), account: params.account, @@ -232,7 +230,7 @@ impl From ) } ImageRegistry::Ghcr(params) => { - monitor_client::entities::build::ImageRegistry::Standard( + komodo_client::entities::build::ImageRegistry::Standard( StandardRegistryConfig { domain: String::from("ghcr.io"), account: params.account, @@ -241,7 +239,7 @@ impl From ) } ImageRegistry::AwsEcr(label) => { - monitor_client::entities::build::ImageRegistry::AwsEcr(label) + komodo_client::entities::build::ImageRegistry::AwsEcr(label) } } } diff --git a/bin/migrator/src/legacy/v1_11/deployment.rs b/bin/migrator/src/legacy/v1_11/deployment.rs index 8dd5723c0..8c990941a 100644 --- a/bin/migrator/src/legacy/v1_11/deployment.rs +++ b/bin/migrator/src/legacy/v1_11/deployment.rs @@ -1,10 +1,9 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ deployment::{ conversions_deserializer, term_labels_deserializer, Conversion, - DeploymentImage, RestartMode, TerminationSignal, - TerminationSignalLabel, + DeploymentImage, RestartMode, TerminationSignalLabel, }, - env_vars_deserializer, EnvironmentVar, + env_vars_deserializer, EnvironmentVar, TerminationSignal, }; use serde::{Deserialize, Serialize}; @@ -13,10 +12,10 @@ use super::{build::ImageRegistry, resource::Resource}; pub type Deployment = Resource; impl From - for monitor_client::entities::deployment::Deployment + for komodo_client::entities::deployment::Deployment { fn from(value: Deployment) -> Self { - monitor_client::entities::deployment::Deployment { + komodo_client::entities::deployment::Deployment { id: value.id, name: value.name, description: value.description, @@ -36,7 +35,7 @@ pub struct DeploymentConfig { pub server_id: String, /// The image which the deployment deploys. - /// Can either be a user inputted image, or a Monitor build. + /// Can either be a user inputted image, or a Komodo build. #[serde(default)] pub image: DeploymentImage, @@ -136,10 +135,10 @@ fn default_network() -> String { } impl From - for monitor_client::entities::deployment::DeploymentConfig + for komodo_client::entities::deployment::DeploymentConfig { fn from(value: DeploymentConfig) -> Self { - monitor_client::entities::deployment::DeploymentConfig { + komodo_client::entities::deployment::DeploymentConfig { server_id: value.server_id, image: value.image, image_registry_account: match value.image_registry { diff --git a/bin/migrator/src/legacy/v1_6/build.rs b/bin/migrator/src/legacy/v1_6/build.rs index e0344059a..c70ff6d2a 100644 --- a/bin/migrator/src/legacy/v1_6/build.rs +++ b/bin/migrator/src/legacy/v1_6/build.rs @@ -1,4 +1,4 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ build::{ImageRegistry, StandardRegistryConfig}, NoData, }; @@ -10,15 +10,15 @@ use super::{ pub type Build = Resource; -impl From for monitor_client::entities::build::Build { +impl From for komodo_client::entities::build::Build { fn from(value: Build) -> Self { - monitor_client::entities::build::Build { + komodo_client::entities::build::Build { id: value.id, name: value.name, description: value.description, updated_at: value.updated_at, tags: value.tags, - info: monitor_client::entities::build::BuildInfo { + info: komodo_client::entities::build::BuildInfo { last_built_at: value.info.last_built_at, built_hash: None, built_message: None, @@ -113,13 +113,13 @@ pub struct BuildConfig { } impl From - for monitor_client::entities::build::BuildConfig + for komodo_client::entities::build::BuildConfig { fn from(value: BuildConfig) -> Self { - monitor_client::entities::build::BuildConfig { + komodo_client::entities::build::BuildConfig { builder_id: value.builder_id, skip_secret_interp: value.skip_secret_interp, - version: monitor_client::entities::Version { + version: komodo_client::entities::Version { major: value.version.major, minor: value.version.minor, patch: value.version.patch, @@ -133,7 +133,7 @@ impl From branch: value.branch, commit: value.commit, git_account: value.github_account, - pre_build: monitor_client::entities::SystemCommand { + pre_build: komodo_client::entities::SystemCommand { path: value.pre_build.path, command: value.pre_build.command, }, diff --git a/bin/migrator/src/legacy/v1_6/deployment.rs b/bin/migrator/src/legacy/v1_6/deployment.rs index c398853f1..0c8f77f86 100644 --- a/bin/migrator/src/legacy/v1_6/deployment.rs +++ b/bin/migrator/src/legacy/v1_6/deployment.rs @@ -1,6 +1,8 @@ -use monitor_client::entities::deployment::{ - Conversion, DeploymentImage, RestartMode, TerminationSignal, - TerminationSignalLabel, +use komodo_client::entities::{ + deployment::{ + Conversion, DeploymentImage, RestartMode, TerminationSignalLabel, + }, + TerminationSignal, }; use serde::{Deserialize, Serialize}; @@ -9,7 +11,7 @@ use super::{resource::Resource, EnvironmentVar}; pub type Deployment = Resource; impl From - for monitor_client::entities::deployment::Deployment + for komodo_client::entities::deployment::Deployment { fn from(value: Deployment) -> Self { Self { @@ -36,7 +38,7 @@ pub struct DeploymentConfig { pub send_alerts: bool, /// The image which the deployment deploys. - /// Can either be a user inputted image, or a Monitor build. + /// Can either be a user inputted image, or a Komodo build. #[serde(default)] pub image: DeploymentImage, @@ -108,7 +110,7 @@ pub struct DeploymentConfig { } impl From - for monitor_client::entities::deployment::DeploymentConfig + for komodo_client::entities::deployment::DeploymentConfig { fn from(value: DeploymentConfig) -> Self { Self { diff --git a/bin/migrator/src/legacy/v1_6/mod.rs b/bin/migrator/src/legacy/v1_6/mod.rs index 1d3eb6bf1..e1bf5a062 100644 --- a/bin/migrator/src/legacy/v1_6/mod.rs +++ b/bin/migrator/src/legacy/v1_6/mod.rs @@ -56,10 +56,10 @@ pub struct EnvironmentVar { } impl From - for monitor_client::entities::EnvironmentVar + for komodo_client::entities::EnvironmentVar { fn from(value: EnvironmentVar) -> Self { - monitor_client::entities::EnvironmentVar { + komodo_client::entities::EnvironmentVar { variable: value.variable, value: value.value, } diff --git a/bin/migrator/src/main.rs b/bin/migrator/src/main.rs index 66fa429ca..98930f0ef 100644 --- a/bin/migrator/src/main.rs +++ b/bin/migrator/src/main.rs @@ -1,12 +1,6 @@ #[macro_use] extern crate tracing; -use anyhow::Context; -use monitor_client::entities::{ - build::Build, deployment::Deployment, permission::Permission, - server::Server, update::Update, user::User, -}; -use mungos::{init::MongoBuilder, mongodb::Collection}; use serde::Deserialize; mod legacy; @@ -14,8 +8,6 @@ mod migrate; #[derive(Deserialize)] enum Migration { - #[serde(alias = "v0")] - V0, #[serde(alias = "v1.6")] V1_6, #[serde(alias = "v1.11")] @@ -27,10 +19,6 @@ struct Env { migration: Migration, target_uri: String, target_db_name: String, - /// Only needed for v0 migration - legacy_uri: Option, - /// Only needed for v0 migration - legacy_db_name: Option, } #[tokio::main] @@ -43,20 +31,6 @@ async fn main() -> anyhow::Result<()> { let env: Env = envy::from_env()?; match env.migration { - Migration::V0 => { - let legacy_db = legacy::v0::DbClient::new( - &env.legacy_uri.context( - "must provide LEGACY_URI in env for v0 migration", - )?, - &env.legacy_db_name.context( - "must provide LEGACY_DB_NAME in env for v0 migration", - )?, - ) - .await; - let target_db = - DbClient::new(&env.target_uri, &env.target_db_name).await?; - migrate::v0::migrate_all(&legacy_db, &target_db).await? - } Migration::V1_6 => { let db = legacy::v1_6::DbClient::new( &env.target_uri, @@ -79,30 +53,3 @@ async fn main() -> anyhow::Result<()> { Ok(()) } - -struct DbClient { - pub users: Collection, - pub updates: Collection, - pub servers: Collection, - pub deployments: Collection, - pub builds: Collection, - pub permissions: Collection, -} - -impl DbClient { - pub async fn new( - uri: &str, - db_name: &str, - ) -> anyhow::Result { - let client = MongoBuilder::default().uri(uri).build().await?; - let db = client.database(db_name); - Ok(DbClient { - users: db.collection("User"), - updates: db.collection("Update"), - servers: db.collection("Server"), - deployments: db.collection("Deployment"), - builds: db.collection("Build"), - permissions: db.collection("Permission"), - }) - } -} diff --git a/bin/migrator/src/migrate/mod.rs b/bin/migrator/src/migrate/mod.rs index a1e9bf2b4..c34fe3e94 100644 --- a/bin/migrator/src/migrate/mod.rs +++ b/bin/migrator/src/migrate/mod.rs @@ -1,3 +1,2 @@ -pub mod v0; pub mod v1_11; pub mod v1_6; diff --git a/bin/migrator/src/migrate/v0.rs b/bin/migrator/src/migrate/v0.rs deleted file mode 100644 index 5d91067de..000000000 --- a/bin/migrator/src/migrate/v0.rs +++ /dev/null @@ -1,309 +0,0 @@ -use anyhow::Context; -use monitor_client::entities::{ - build::Build, - deployment::Deployment, - permission::{Permission, UserTarget}, - server::Server, - update::{ResourceTarget, Update}, - user::User, -}; -use mungos::{ - find::find_collect, mongodb::options::InsertManyOptions, -}; - -use crate::legacy::v0; - -pub async fn migrate_all( - legacy_db: &v0::DbClient, - target_db: &crate::DbClient, -) -> anyhow::Result<()> { - migrate_users(legacy_db, target_db).await?; - migrate_servers(legacy_db, target_db).await?; - migrate_deployments(legacy_db, target_db).await?; - migrate_builds(legacy_db, target_db).await?; - migrate_updates(legacy_db, target_db).await?; - Ok(()) -} - -#[allow(unused)] -pub async fn migrate_users( - legacy_db: &v0::DbClient, - target_db: &crate::DbClient, -) -> anyhow::Result<()> { - let existing = find_collect(&target_db.users, None, None) - .await - .context("failed to get existing target users")?; - - let users = find_collect(&legacy_db.users, None, None) - .await - .context("failed to get legacy users")? - .into_iter() - .filter_map(|user| { - if existing.iter().any(|u| u.username == user.username) { - return None; - } - - let username = user.username.clone(); - user - .try_into() - .inspect_err(|e| { - warn!("failed to convert user {username} | {e:#}") - }) - .ok() - }) - .collect::>(); - - info!("migrating {} users...", users.len()); - - target_db - .users - .insert_many(users) - .await - .context("failed to insert users on target")?; - - info!("users have been migrated\n"); - - Ok(()) -} - -pub async fn migrate_servers( - legacy_db: &v0::DbClient, - target_db: &crate::DbClient, -) -> anyhow::Result<()> { - let existing = find_collect(&target_db.servers, None, None) - .await - .context("failed to get existing target servers")?; - - let servers = find_collect(&legacy_db.servers, None, None) - .await - .context("failed to get legacy servers")?; - - let mut new_servers = Vec::::new(); - let mut permissions = Vec::::new(); - - for server in servers { - if existing.iter().any(|s| s.name == server.name) { - continue; - } - - for (user_id, level) in &server.permissions { - let permission = Permission { - id: Default::default(), - user_target: UserTarget::User(user_id.clone()), - resource_target: ResourceTarget::Server(server.id.clone()), - level: (*level).into(), - }; - permissions.push(permission); - } - let name = server.name.clone(); - server - .try_into() - .inspect_err(|e| { - warn!("failed to convert server {name} | {e:#}") - }) - .map(|s| new_servers.push(s)) - .ok(); - } - - info!("migrating {} servers...", new_servers.len()); - - if !new_servers.is_empty() { - target_db - .servers - .insert_many(new_servers) - .with_options( - InsertManyOptions::builder().ordered(false).build(), - ) - .await - .context("failed to insert servers on target")?; - } - - if !permissions.is_empty() { - target_db - .permissions - .insert_many(permissions) - .with_options( - InsertManyOptions::builder().ordered(false).build(), - ) - .await - .context("failed to insert server permissions on target")?; - } - - info!("servers have been migrated\n"); - - Ok(()) -} - -pub async fn migrate_deployments( - legacy_db: &v0::DbClient, - target_db: &crate::DbClient, -) -> anyhow::Result<()> { - let existing = find_collect(&target_db.deployments, None, None) - .await - .context("failed to get existing target deployments")?; - - let deployments = find_collect(&legacy_db.deployments, None, None) - .await - .context("failed to get legacy deployments")?; - - let mut new_deployments = Vec::::new(); - let mut permissions = Vec::::new(); - - for deployment in deployments { - if existing.iter().any(|d| d.name == deployment.name) { - continue; - } - - for (user_id, level) in &deployment.permissions { - let permission = Permission { - id: Default::default(), - user_target: UserTarget::User(user_id.clone()), - resource_target: ResourceTarget::Deployment( - deployment.id.clone(), - ), - level: (*level).into(), - }; - permissions.push(permission); - } - let name = deployment.name.clone(); - deployment - .try_into() - .inspect_err(|e| { - warn!("failed to convert deployment {name} | {e:#}") - }) - .map(|s| new_deployments.push(s)) - .ok(); - } - - info!("migrating {} deployments...", new_deployments.len()); - - if !new_deployments.is_empty() { - target_db - .deployments - .insert_many(new_deployments) - .with_options( - InsertManyOptions::builder().ordered(false).build(), - ) - .await - .context("failed to insert deployments on target")?; - } - - if !permissions.is_empty() { - target_db - .permissions - .insert_many(permissions) - .with_options( - InsertManyOptions::builder().ordered(false).build(), - ) - .await - .context("failed to insert deployment permissions on target")?; - } - - info!("deployments have been migrated\n"); - - Ok(()) -} - -pub async fn migrate_builds( - legacy_db: &v0::DbClient, - target_db: &crate::DbClient, -) -> anyhow::Result<()> { - let existing = find_collect(&target_db.builds, None, None) - .await - .context("failed to get existing target builds")?; - - let builds = find_collect(&legacy_db.builds, None, None) - .await - .context("failed to get legacy builds")?; - - let mut new_builds = Vec::::new(); - let mut permissions = Vec::::new(); - - for build in builds { - if existing.iter().any(|b| b.name == build.name) { - continue; - } - - for (user_id, level) in &build.permissions { - let permission = Permission { - id: Default::default(), - user_target: UserTarget::User(user_id.clone()), - resource_target: ResourceTarget::Build(build.id.clone()), - level: (*level).into(), - }; - permissions.push(permission); - } - let name = build.name.clone(); - build - .try_into() - .inspect_err(|e| { - warn!("failed to convert build {name} | {e:#}") - }) - .map(|s| new_builds.push(s)) - .ok(); - } - - info!("migrating {} builds...", new_builds.len()); - - if !new_builds.is_empty() { - target_db - .builds - .insert_many(new_builds) - .with_options( - InsertManyOptions::builder().ordered(false).build(), - ) - .await - .inspect_err(|e| { - warn!("failed to insert builds on target | {e}") - }) - .ok(); - } - - if !permissions.is_empty() { - target_db - .permissions - .insert_many(permissions) - .with_options( - InsertManyOptions::builder().ordered(false).build(), - ) - .await - .inspect_err(|e| { - warn!("failed to insert build permissions on target | {e}") - }) - .ok(); - } - - info!("builds have been migrated\n"); - - Ok(()) -} - -#[allow(unused)] -pub async fn migrate_updates( - legacy_db: &v0::DbClient, - target_db: &crate::DbClient, -) -> anyhow::Result<()> { - let updates = find_collect(&legacy_db.updates, None, None) - .await - .context("failed to get legacy updates")? - .into_iter() - .map(|s| { - let context = - format!("failed to convert update | _id {}", s.id); - s.try_into().context(context) - }) - .collect::>>()?; - - info!("migrating {} updates...", updates.len()); - - target_db - .updates - .insert_many(updates) - .with_options(InsertManyOptions::builder().ordered(false).build()) - .await - .context("failed to insert updates on target")?; - - info!("updates have been migrated\n"); - - Ok(()) -} diff --git a/bin/migrator/src/migrate/v1_11.rs b/bin/migrator/src/migrate/v1_11.rs index 439ee2147..52549f05e 100644 --- a/bin/migrator/src/migrate/v1_11.rs +++ b/bin/migrator/src/migrate/v1_11.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ build::Build, deployment::Deployment, }; use mungos::{ diff --git a/bin/migrator/src/migrate/v1_6.rs b/bin/migrator/src/migrate/v1_6.rs index 29b7ceca8..e4151f814 100644 --- a/bin/migrator/src/migrate/v1_6.rs +++ b/bin/migrator/src/migrate/v1_6.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::entities::{ +use komodo_client::entities::{ build::Build, deployment::Deployment, }; use mungos::{ diff --git a/bin/periphery/Cargo.toml b/bin/periphery/Cargo.toml index 0d4f392f0..afab2fe87 100644 --- a/bin/periphery/Cargo.toml +++ b/bin/periphery/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "monitor_periphery" +name = "komodo_periphery" version.workspace = true edition.workspace = true authors.workspace = true @@ -15,8 +15,8 @@ path = "src/main.rs" [dependencies] # local -monitor_client = { workspace = true, features = ["docker"] } periphery_client.workspace = true +komodo_client.workspace = true formatting.workspace = true command.workspace = true logger.workspace = true diff --git a/bin/periphery/Dockerfile b/bin/periphery/Dockerfile index 12737cb32..518933c70 100644 --- a/bin/periphery/Dockerfile +++ b/bin/periphery/Dockerfile @@ -2,7 +2,7 @@ FROM rust:1.80.1-bookworm AS builder WORKDIR /builder COPY . . -RUN cargo build -p monitor_periphery --release +RUN cargo build -p komodo_periphery --release # Final Image FROM debian:bookworm-slim @@ -18,8 +18,8 @@ COPY --from=builder /builder/target/release/periphery / EXPOSE 8120 # Label for Ghcr -LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor -LABEL org.opencontainers.image.description="Monitor Periphery" +LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo +LABEL org.opencontainers.image.description="Komodo Periphery" LABEL org.opencontainers.image.licenses=GPL-3.0 # Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose. diff --git a/bin/periphery/src/api/build.rs b/bin/periphery/src/api/build.rs index 33f0a27f5..9f3b936c2 100644 --- a/bin/periphery/src/api/build.rs +++ b/bin/periphery/src/api/build.rs @@ -1,20 +1,18 @@ use anyhow::{anyhow, Context}; -use command::run_monitor_command; +use command::run_komodo_command; use formatting::format_serror; -use monitor_client::entities::{ +use komodo_client::entities::{ build::{Build, BuildConfig}, - get_image_name, optional_string, - server::docker_image::ImageSummary, - to_monitor_name, + get_image_name, optional_string, to_komodo_name, update::Log, EnvironmentVar, Version, }; -use periphery_client::api::build::{self, GetImageList, PruneImages}; +use periphery_client::api::build; use resolver_api::Resolve; use crate::{ config::periphery_config, - docker::{docker_client, docker_login}, + docker::docker_login, helpers::{parse_extra_args, parse_labels}, State, }; @@ -74,7 +72,7 @@ impl Resolve for State { } }; - let name = to_monitor_name(name); + let name = to_komodo_name(name); // Get paths let build_dir = @@ -109,8 +107,7 @@ impl Resolve for State { if *skip_secret_interp { let build_log = - run_monitor_command("docker build", command).await; - info!("finished building docker image"); + run_komodo_command("docker build", command).await; logs.push(build_log); } else { // Interpolate any missing secrets @@ -126,7 +123,7 @@ impl Resolve for State { replacers.extend(core_replacers); let mut build_log = - run_monitor_command("docker build", command).await; + run_komodo_command("docker build", command).await; build_log.command = svi::replace_in_string(&build_log.command, &replacers); build_log.stdout = @@ -212,30 +209,3 @@ fn cleanup_secret_env_vars(secret_args: &[EnvironmentVar]) { |EnvironmentVar { variable, .. }| std::env::remove_var(variable), ) } - -// - -impl Resolve for State { - #[instrument(name = "GetImageList", level = "debug", skip(self))] - async fn resolve( - &self, - _: GetImageList, - _: (), - ) -> anyhow::Result> { - docker_client().list_images().await - } -} - -// - -impl Resolve for State { - #[instrument(name = "PruneImages", skip(self))] - async fn resolve( - &self, - _: PruneImages, - _: (), - ) -> anyhow::Result { - let command = String::from("docker image prune -a -f"); - Ok(run_monitor_command("prune images", command).await) - } -} diff --git a/bin/periphery/src/api/compose.rs b/bin/periphery/src/api/compose.rs index cb72a26c2..de1ec61aa 100644 --- a/bin/periphery/src/api/compose.rs +++ b/bin/periphery/src/api/compose.rs @@ -1,11 +1,11 @@ use std::path::PathBuf; use anyhow::{anyhow, Context}; -use command::run_monitor_command; +use command::run_komodo_command; use formatting::format_serror; -use monitor_client::entities::{ +use komodo_client::entities::{ stack::{ComposeContents, ComposeProject}, - to_monitor_name, + to_komodo_name, update::Log, }; use periphery_client::api::compose::*; @@ -28,7 +28,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result> { let docker_compose = docker_compose(); - let res = run_monitor_command( + let res = run_komodo_command( "list projects", format!("{docker_compose} ls --all --format json"), ) @@ -89,7 +89,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let root = - periphery_config().stack_dir.join(to_monitor_name(&name)); + periphery_config().stack_dir.join(to_komodo_name(&name)); let run_directory = root.join(&run_directory); let run_directory = run_directory.canonicalize().context( "failed to validate run directory on host (canonicalize error)", @@ -149,7 +149,7 @@ impl Resolve for State { let command = format!( "{docker_compose} -p {project} logs {service} --tail {tail}" ); - Ok(run_monitor_command("get stack log", command).await) + Ok(run_komodo_command("get stack log", command).await) } } @@ -173,7 +173,7 @@ impl Resolve for State { let docker_compose = docker_compose(); let grep = log_grep(&terms, combinator, invert); let command = format!("{docker_compose} -p {project} logs {service} --tail 5000 2>&1 | {grep}"); - Ok(run_monitor_command("get stack log grep", command).await) + Ok(run_komodo_command("get stack log grep", command).await) } } @@ -191,13 +191,20 @@ impl Resolve for State { service, git_token, registry_token, + replacers, }: ComposeUp, _: (), ) -> anyhow::Result { let mut res = ComposeUpResponse::default(); - if let Err(e) = - compose_up(stack, service, git_token, registry_token, &mut res) - .await + if let Err(e) = compose_up( + stack, + service, + git_token, + registry_token, + &mut res, + replacers, + ) + .await { res.logs.push(Log::error( "compose up failed", @@ -218,7 +225,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let docker_compose = docker_compose(); - let log = run_monitor_command( + let log = run_komodo_command( "compose command", format!("{docker_compose} -p {project} {command}"), ) diff --git a/bin/periphery/src/api/container.rs b/bin/periphery/src/api/container.rs index 5e3f9b3db..a737e9de2 100644 --- a/bin/periphery/src/api/container.rs +++ b/bin/periphery/src/api/container.rs @@ -1,11 +1,9 @@ use anyhow::{anyhow, Context}; -use command::run_monitor_command; +use command::run_komodo_command; use futures::future::join_all; -use monitor_client::entities::{ - deployment::{ - ContainerSummary, DeploymentState, DockerContainerStats, - }, - to_monitor_name, +use komodo_client::entities::{ + docker::container::{Container, ContainerListItem, ContainerStats}, + to_komodo_name, update::Log, }; use periphery_client::api::container::*; @@ -23,18 +21,18 @@ use crate::{ // -impl Resolve for State { +impl Resolve for State { #[instrument( - name = "GetContainerList", + name = "InspectContainer", level = "debug", skip(self) )] async fn resolve( &self, - _: GetContainerList, + InspectContainer { name }: InspectContainer, _: (), - ) -> anyhow::Result> { - docker_client().list_containers().await + ) -> anyhow::Result { + docker_client().inspect_container(&name).await } } @@ -48,7 +46,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let command = format!("docker logs {name} --tail {tail}"); - Ok(run_monitor_command("get container log", command).await) + Ok(run_komodo_command("get container log", command).await) } } @@ -73,7 +71,7 @@ impl Resolve for State { let grep = log_grep(&terms, combinator, invert); let command = format!("docker logs {name} --tail 5000 2>&1 | {grep}"); - Ok(run_monitor_command("get container log grep", command).await) + Ok(run_komodo_command("get container log grep", command).await) } } @@ -89,7 +87,7 @@ impl Resolve for State { &self, req: GetContainerStats, _: (), - ) -> anyhow::Result { + ) -> anyhow::Result { let error = anyhow!("no stats matching {}", req.name); let mut stats = container_stats(Some(req.name)).await?; let stats = stats.pop().ok_or(error)?; @@ -109,7 +107,7 @@ impl Resolve for State { &self, _: GetContainerStatsList, _: (), - ) -> anyhow::Result> { + ) -> anyhow::Result> { container_stats(None).await } } @@ -126,7 +124,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { Ok( - run_monitor_command( + run_komodo_command( "docker start", format!("docker start {name}"), ) @@ -145,7 +143,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { Ok( - run_monitor_command( + run_komodo_command( "docker restart", format!("docker restart {name}"), ) @@ -164,7 +162,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { Ok( - run_monitor_command( + run_komodo_command( "docker pause", format!("docker pause {name}"), ) @@ -181,7 +179,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { Ok( - run_monitor_command( + run_komodo_command( "docker unpause", format!("docker unpause {name}"), ) @@ -200,10 +198,10 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let command = stop_container_command(&name, signal, time); - let log = run_monitor_command("docker stop", command).await; + let log = run_komodo_command("docker stop", command).await; if log.stderr.contains("unknown flag: --signal") { let command = stop_container_command(&name, None, time); - let mut log = run_monitor_command("docker stop", command).await; + let mut log = run_komodo_command("docker stop", command).await; log.stderr = format!( "old docker version: unable to use --signal flag{}", if !log.stderr.is_empty() { @@ -221,38 +219,6 @@ impl Resolve for State { // -impl Resolve for State { - #[instrument(name = "StopAllContainers", skip(self))] - async fn resolve( - &self, - StopAllContainers {}: StopAllContainers, - _: (), - ) -> anyhow::Result> { - let containers = docker_client() - .list_containers() - .await - .context("failed to list all containers on host")?; - let futures = containers.iter().filter_map( - |ContainerSummary { name, state, .. }| { - // only stop running containers. if not running, early exit. - if !matches!(state, DeploymentState::Running) { - return None; - } - Some(async move { - run_monitor_command( - &format!("docker stop {name}"), - stop_container_command(name, None, None), - ) - .await - }) - }, - ); - Ok(join_all(futures).await) - } -} - -// - impl Resolve for State { #[instrument(name = "RemoveContainer", skip(self))] async fn resolve( @@ -264,12 +230,12 @@ impl Resolve for State { let command = format!("{stop_command} && docker container rm {name}"); let log = - run_monitor_command("docker stop and remove", command).await; + run_komodo_command("docker stop and remove", command).await; if log.stderr.contains("unknown flag: --signal") { let stop_command = stop_container_command(&name, None, time); let command = format!("{stop_command} && docker container rm {name}"); - let mut log = run_monitor_command("docker stop", command).await; + let mut log = run_komodo_command("docker stop", command).await; log.stderr = format!( "old docker version: unable to use --signal flag{}", if !log.stderr.is_empty() { @@ -297,9 +263,9 @@ impl Resolve for State { }: RenameContainer, _: (), ) -> anyhow::Result { - let new = to_monitor_name(&new_name); + let new = to_komodo_name(&new_name); let command = format!("docker rename {curr_name} {new}"); - Ok(run_monitor_command("docker rename", command).await) + Ok(run_komodo_command("docker rename", command).await) } } @@ -313,6 +279,133 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let command = String::from("docker container prune -f"); - Ok(run_monitor_command("prune containers", command).await) + Ok(run_komodo_command("prune containers", command).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "StartAllContainers", skip(self))] + async fn resolve( + &self, + StartAllContainers {}: StartAllContainers, + _: (), + ) -> anyhow::Result> { + let containers = docker_client() + .list_containers() + .await + .context("failed to list all containers on host")?; + let futures = + containers.iter().map( + |ContainerListItem { name, .. }| { + let command = format!("docker start {name}"); + async move { + run_komodo_command(&command.clone(), command).await + } + }, + ); + Ok(join_all(futures).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "RestartAllContainers", skip(self))] + async fn resolve( + &self, + RestartAllContainers {}: RestartAllContainers, + _: (), + ) -> anyhow::Result> { + let containers = docker_client() + .list_containers() + .await + .context("failed to list all containers on host")?; + let futures = containers.iter().map( + |ContainerListItem { name, .. }| { + let command = format!("docker restart {name}"); + async move { + run_komodo_command(&command.clone(), command).await + } + }, + ); + Ok(join_all(futures).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "PauseAllContainers", skip(self))] + async fn resolve( + &self, + PauseAllContainers {}: PauseAllContainers, + _: (), + ) -> anyhow::Result> { + let containers = docker_client() + .list_containers() + .await + .context("failed to list all containers on host")?; + let futures = containers.iter().map( + |ContainerListItem { name, .. }| { + let command = format!("docker pause {name}"); + async move { + run_komodo_command(&command.clone(), command).await + } + }, + ); + Ok(join_all(futures).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "UnpauseAllContainers", skip(self))] + async fn resolve( + &self, + UnpauseAllContainers {}: UnpauseAllContainers, + _: (), + ) -> anyhow::Result> { + let containers = docker_client() + .list_containers() + .await + .context("failed to list all containers on host")?; + let futures = containers.iter().map( + |ContainerListItem { name, .. }| { + let command = format!("docker unpause {name}"); + async move { + run_komodo_command(&command.clone(), command).await + } + }, + ); + Ok(join_all(futures).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "StopAllContainers", skip(self))] + async fn resolve( + &self, + StopAllContainers {}: StopAllContainers, + _: (), + ) -> anyhow::Result> { + let containers = docker_client() + .list_containers() + .await + .context("failed to list all containers on host")?; + let futures = containers.iter().map( + |ContainerListItem { name, .. }| async move { + run_komodo_command( + &format!("docker stop {name}"), + stop_container_command(name, None, None), + ) + .await + }, + ); + Ok(join_all(futures).await) } } diff --git a/bin/periphery/src/api/deploy.rs b/bin/periphery/src/api/deploy.rs index 8da84cf39..808d2172e 100644 --- a/bin/periphery/src/api/deploy.rs +++ b/bin/periphery/src/api/deploy.rs @@ -1,13 +1,13 @@ use anyhow::Context; -use command::run_monitor_command; +use command::run_komodo_command; use formatting::format_serror; -use monitor_client::entities::{ +use komodo_client::entities::{ build::{ImageRegistry, StandardRegistryConfig}, deployment::{ extract_registry_domain, Conversion, Deployment, DeploymentConfig, DeploymentImage, RestartMode, }, - to_monitor_name, + to_komodo_name, update::Log, EnvironmentVar, NoData, }; @@ -100,7 +100,7 @@ impl Resolve for State { debug!("docker run command: {command}"); if deployment.config.skip_secret_interp { - Ok(run_monitor_command("docker run", command).await) + Ok(run_komodo_command("docker run", command).await) } else { let command = svi::interpolate_variables( &command, @@ -111,17 +111,20 @@ impl Resolve for State { .context( "failed to interpolate secrets into docker run command", ); + let (command, mut replacers) = match command { Ok(res) => res, Err(e) => { return Ok(Log::error("docker run", format!("{e:?}"))); } }; + replacers.extend(core_replacers); - let mut log = run_monitor_command("docker run", command).await; + let mut log = run_komodo_command("docker run", command).await; log.command = svi::replace_in_string(&log.command, &replacers); log.stdout = svi::replace_in_string(&log.stdout, &replacers); log.stderr = svi::replace_in_string(&log.stderr, &replacers); + Ok(log) } } @@ -146,7 +149,7 @@ fn docker_run_command( }: &Deployment, image: &str, ) -> String { - let name = to_monitor_name(name); + let name = to_komodo_name(name); let ports = parse_conversions(ports, "-p"); let volumes = volumes.to_owned(); let volumes = parse_conversions(&volumes, "-v"); diff --git a/bin/periphery/src/api/git.rs b/bin/periphery/src/api/git.rs index 419b63ed7..deaece8fc 100644 --- a/bin/periphery/src/api/git.rs +++ b/bin/periphery/src/api/git.rs @@ -1,10 +1,10 @@ use anyhow::{anyhow, Context}; -use monitor_client::entities::{ - to_monitor_name, update::Log, CloneArgs, LatestCommit, +use komodo_client::entities::{ + to_komodo_name, update::Log, CloneArgs, LatestCommit, }; use periphery_client::api::git::{ CloneRepo, DeleteRepo, GetLatestCommit, PullRepo, - RepoActionResponse, RepoActionResponseV1_13, + RepoActionResponse, }; use resolver_api::Resolve; @@ -27,7 +27,7 @@ impl Resolve for State { } impl Resolve for State { - #[instrument(name = "CloneRepo", skip(self))] + #[instrument(name = "CloneRepo", skip(self, environment))] async fn resolve( &self, CloneRepo { @@ -36,6 +36,7 @@ impl Resolve for State { environment, env_file_path, skip_secret_interp, + replacers, }: CloneRepo, _: (), ) -> anyhow::Result { @@ -64,16 +65,16 @@ impl Resolve for State { &environment, &env_file_path, (!skip_secret_interp).then_some(&periphery_config().secrets), + &replacers, ) .await .map(|(logs, commit_hash, commit_message, env_file_path)| { - RepoActionResponseV1_13 { + RepoActionResponse { logs, commit_hash, commit_message, env_file_path, } - .into() }) } } @@ -81,7 +82,7 @@ impl Resolve for State { // impl Resolve for State { - #[instrument(name = "PullRepo", skip(self))] + #[instrument(name = "PullRepo", skip(self, on_pull, environment))] async fn resolve( &self, PullRepo { @@ -92,10 +93,11 @@ impl Resolve for State { environment, env_file_path, skip_secret_interp, + replacers, }: PullRepo, _: (), ) -> anyhow::Result { - let name = to_monitor_name(&name); + let name = to_komodo_name(&name); let (logs, commit_hash, commit_message, env_file_path) = git::pull( &periphery_config().repo_dir.join(name), @@ -105,17 +107,15 @@ impl Resolve for State { &environment, &env_file_path, (!skip_secret_interp).then_some(&periphery_config().secrets), + &replacers, ) .await; - Ok( - RepoActionResponseV1_13 { - logs, - commit_hash, - commit_message, - env_file_path, - } - .into(), - ) + Ok(RepoActionResponse { + logs, + commit_hash, + commit_message, + env_file_path, + }) } } @@ -128,7 +128,7 @@ impl Resolve for State { DeleteRepo { name }: DeleteRepo, _: (), ) -> anyhow::Result { - let name = to_monitor_name(&name); + let name = to_komodo_name(&name); let deleted = std::fs::remove_dir_all( periphery_config().repo_dir.join(&name), ); diff --git a/bin/periphery/src/api/image.rs b/bin/periphery/src/api/image.rs new file mode 100644 index 000000000..9595da5bd --- /dev/null +++ b/bin/periphery/src/api/image.rs @@ -0,0 +1,63 @@ +use command::run_komodo_command; +use komodo_client::entities::{ + docker::image::{Image, ImageHistoryResponseItem}, + update::Log, +}; +use periphery_client::api::image::*; +use resolver_api::Resolve; + +use crate::{docker::docker_client, State}; + +// + +impl Resolve for State { + #[instrument(name = "InspectImage", level = "debug", skip(self))] + async fn resolve( + &self, + InspectImage { name }: InspectImage, + _: (), + ) -> anyhow::Result { + docker_client().inspect_image(&name).await + } +} + +// + +impl Resolve for State { + #[instrument(name = "ImageHistory", level = "debug", skip(self))] + async fn resolve( + &self, + ImageHistory { name }: ImageHistory, + _: (), + ) -> anyhow::Result> { + docker_client().image_history(&name).await + } +} + +// + +impl Resolve for State { + #[instrument(name = "DeleteImage", skip(self))] + async fn resolve( + &self, + DeleteImage { name }: DeleteImage, + _: (), + ) -> anyhow::Result { + let command = format!("docker image rm {name}"); + Ok(run_komodo_command("delete image", command).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "PruneImages", skip(self))] + async fn resolve( + &self, + _: PruneImages, + _: (), + ) -> anyhow::Result { + let command = String::from("docker image prune -a -f"); + Ok(run_komodo_command("prune images", command).await) + } +} diff --git a/bin/periphery/src/api/mod.rs b/bin/periphery/src/api/mod.rs index f22f3518b..6bddbbbec 100644 --- a/bin/periphery/src/api/mod.rs +++ b/bin/periphery/src/api/mod.rs @@ -1,12 +1,12 @@ use anyhow::Context; -use command::run_monitor_command; +use command::run_komodo_command; use futures::TryFutureExt; -use monitor_client::entities::{update::Log, SystemCommand}; +use komodo_client::entities::{update::Log, SystemCommand}; use periphery_client::api::{ - build::*, compose::*, container::*, git::*, network::*, stats::*, - GetDockerLists, GetDockerListsResponse, GetHealth, GetVersion, - GetVersionResponse, ListDockerRegistries, ListGitProviders, - ListSecrets, PruneSystem, RunCommand, + build::*, compose::*, container::*, git::*, image::*, network::*, + stats::*, volume::*, GetDockerLists, GetDockerListsResponse, + GetHealth, GetVersion, GetVersionResponse, ListDockerRegistries, + ListGitProviders, ListSecrets, PruneSystem, RunCommand, }; use resolver_api::{derive::Resolver, Resolve, ResolveToString}; use serde::{Deserialize, Serialize}; @@ -25,8 +25,10 @@ mod compose; mod container; mod deploy; mod git; +mod image; mod network; mod stats; +mod volume; #[derive(Serialize, Deserialize, Debug, Clone, Resolver)] #[serde(tag = "type", content = "params")] @@ -37,7 +39,7 @@ pub enum PeripheryRequest { #[to_string_resolver] GetHealth(GetHealth), - // Config + // Config (Read) #[to_string_resolver] ListGitProviders(ListGitProviders), #[to_string_resolver] @@ -45,7 +47,7 @@ pub enum PeripheryRequest { #[to_string_resolver] ListSecrets(ListSecrets), - // Stats / Info + // Stats / Info (Read) #[to_string_resolver] GetSystemInformation(GetSystemInformation), #[to_string_resolver] @@ -54,54 +56,77 @@ pub enum PeripheryRequest { GetSystemProcesses(GetSystemProcesses), GetLatestCommit(GetLatestCommit), - // All in one - GetDockerLists(GetDockerLists), - - // Docker - GetContainerList(GetContainerList), - GetContainerLog(GetContainerLog), - GetContainerLogSearch(GetContainerLogSearch), - GetContainerStats(GetContainerStats), - GetContainerStatsList(GetContainerStatsList), - GetNetworkList(GetNetworkList), - - // Actions + // Generic shell execution RunCommand(RunCommand), - // Repo + // Repo (Write) CloneRepo(CloneRepo), PullRepo(PullRepo), DeleteRepo(DeleteRepo), // Build Build(Build), - PruneImages(PruneImages), - // Container + // Compose (Read) + GetComposeContentsOnHost(GetComposeContentsOnHost), + GetComposeServiceLog(GetComposeServiceLog), + GetComposeServiceLogSearch(GetComposeServiceLogSearch), + + // Compose (Write) + ComposeUp(ComposeUp), + ComposeExecution(ComposeExecution), + + // Container (Read) + InspectContainer(InspectContainer), + GetContainerLog(GetContainerLog), + GetContainerLogSearch(GetContainerLogSearch), + GetContainerStats(GetContainerStats), + GetContainerStatsList(GetContainerStatsList), + + // Container (Write) Deploy(Deploy), StartContainer(StartContainer), RestartContainer(RestartContainer), PauseContainer(PauseContainer), UnpauseContainer(UnpauseContainer), StopContainer(StopContainer), + StartAllContainers(StartAllContainers), + RestartAllContainers(RestartAllContainers), + PauseAllContainers(PauseAllContainers), + UnpauseAllContainers(UnpauseAllContainers), StopAllContainers(StopAllContainers), RemoveContainer(RemoveContainer), RenameContainer(RenameContainer), PruneContainers(PruneContainers), - // Compose - ListComposeProjects(ListComposeProjects), - GetComposeContentsOnHost(GetComposeContentsOnHost), - GetComposeServiceLog(GetComposeServiceLog), - GetComposeServiceLogSearch(GetComposeServiceLogSearch), - ComposeUp(ComposeUp), - ComposeExecution(ComposeExecution), + // Networks (Read) + InspectNetwork(InspectNetwork), - // Networks + // Networks (Write) CreateNetwork(CreateNetwork), DeleteNetwork(DeleteNetwork), PruneNetworks(PruneNetworks), - PruneAll(PruneSystem), + + // Image (Read) + InspectImage(InspectImage), + ImageHistory(ImageHistory), + + // Image (Write) + DeleteImage(DeleteImage), + PruneImages(PruneImages), + + // Volume (Read) + InspectVolume(InspectVolume), + + // Volume (Write) + DeleteVolume(DeleteVolume), + PruneVolumes(PruneVolumes), + + // All in one (Read) + GetDockerLists(GetDockerLists), + + // All in one (Write) + PruneSystem(PruneSystem), } // @@ -185,16 +210,24 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let docker = docker_client(); - let (containers, networks, images, projects) = tokio::join!( - docker.list_containers().map_err(Into::into), - docker.list_networks().map_err(Into::into), - docker.list_images().map_err(Into::into), + let containers = + docker.list_containers().await.map_err(Into::into); + // Should still try to retrieve other docker lists, but "in_use" will be false for images, networks, volumes + let _containers = match &containers { + Ok(containers) => containers.as_slice(), + Err(_) => &[], + }; + let (networks, images, volumes, projects) = tokio::join!( + docker.list_networks(_containers).map_err(Into::into), + docker.list_images(_containers).map_err(Into::into), + docker.list_volumes(_containers).map_err(Into::into), self.resolve(ListComposeProjects {}, ()).map_err(Into::into) ); Ok(GetDockerListsResponse { containers, networks, images, + volumes, projects, }) } @@ -215,7 +248,7 @@ impl Resolve for State { } else { format!("cd {path} && {command}") }; - run_monitor_command("run command", command).await + run_komodo_command("run command", command).await }) .await .context("failure in spawned task") @@ -229,7 +262,7 @@ impl Resolve for State { PruneSystem {}: PruneSystem, _: (), ) -> anyhow::Result { - let command = String::from("docker system prune -a -f"); - Ok(run_monitor_command("prune system", command).await) + let command = String::from("docker system prune -a -f --volumes"); + Ok(run_komodo_command("prune system", command).await) } } diff --git a/bin/periphery/src/api/network.rs b/bin/periphery/src/api/network.rs index ab0eee5b9..b07567043 100644 --- a/bin/periphery/src/api/network.rs +++ b/bin/periphery/src/api/network.rs @@ -1,24 +1,22 @@ -use command::run_monitor_command; -use monitor_client::entities::{ - server::docker_network::DockerNetwork, update::Log, -}; -use periphery_client::api::network::{ - CreateNetwork, DeleteNetwork, GetNetworkList, PruneNetworks, +use command::run_komodo_command; +use komodo_client::entities::{ + docker::network::Network, update::Log, }; +use periphery_client::api::network::*; use resolver_api::Resolve; use crate::{docker::docker_client, State}; // -impl Resolve for State { - #[instrument(name = "GetNetworkList", level = "debug", skip(self))] +impl Resolve for State { + #[instrument(name = "InspectNetwork", level = "debug", skip(self))] async fn resolve( &self, - _: GetNetworkList, + InspectNetwork { name }: InspectNetwork, _: (), - ) -> anyhow::Result> { - docker_client().list_networks().await + ) -> anyhow::Result { + docker_client().inspect_network(&name).await } } @@ -36,7 +34,7 @@ impl Resolve for State { None => String::new(), }; let command = format!("docker network create{driver} {name}"); - Ok(run_monitor_command("create network", command).await) + Ok(run_komodo_command("create network", command).await) } } @@ -50,7 +48,7 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let command = format!("docker network rm {name}"); - Ok(run_monitor_command("delete network", command).await) + Ok(run_komodo_command("delete network", command).await) } } @@ -64,6 +62,6 @@ impl Resolve for State { _: (), ) -> anyhow::Result { let command = String::from("docker network prune -f"); - Ok(run_monitor_command("prune networks", command).await) + Ok(run_komodo_command("prune networks", command).await) } } diff --git a/bin/periphery/src/api/volume.rs b/bin/periphery/src/api/volume.rs new file mode 100644 index 000000000..f60a4e092 --- /dev/null +++ b/bin/periphery/src/api/volume.rs @@ -0,0 +1,47 @@ +use command::run_komodo_command; +use komodo_client::entities::{docker::volume::Volume, update::Log}; +use periphery_client::api::volume::*; +use resolver_api::Resolve; + +use crate::{docker::docker_client, State}; + +// + +impl Resolve for State { + #[instrument(name = "InspectVolume", level = "debug", skip(self))] + async fn resolve( + &self, + InspectVolume { name }: InspectVolume, + _: (), + ) -> anyhow::Result { + docker_client().inspect_volume(&name).await + } +} + +// + +impl Resolve for State { + #[instrument(name = "DeleteVolume", skip(self))] + async fn resolve( + &self, + DeleteVolume { name }: DeleteVolume, + _: (), + ) -> anyhow::Result { + let command = format!("docker volume rm {name}"); + Ok(run_komodo_command("delete volume", command).await) + } +} + +// + +impl Resolve for State { + #[instrument(name = "PruneVolumes", skip(self))] + async fn resolve( + &self, + _: PruneVolumes, + _: (), + ) -> anyhow::Result { + let command = String::from("docker volume prune -a -f"); + Ok(run_komodo_command("prune volumes", command).await) + } +} diff --git a/bin/periphery/src/compose.rs b/bin/periphery/src/compose.rs index a84f36868..40dd3f6af 100644 --- a/bin/periphery/src/compose.rs +++ b/bin/periphery/src/compose.rs @@ -1,20 +1,20 @@ use std::path::PathBuf; use anyhow::{anyhow, Context}; -use command::run_monitor_command; +use command::run_komodo_command; use formatting::format_serror; use git::write_environment_file; -use monitor_client::entities::{ +use komodo_client::entities::{ all_logs_success, build::{ImageRegistry, StandardRegistryConfig}, stack::{ComposeContents, Stack}, - to_monitor_name, + to_komodo_name, update::Log, CloneArgs, }; use periphery_client::api::{ compose::ComposeUpResponse, - git::{CloneRepo, RepoActionResponseV1_13}, + git::{CloneRepo, RepoActionResponse}, }; use resolver_api::Resolve; use tokio::fs; @@ -39,6 +39,7 @@ pub async fn compose_up( git_token: Option, registry_token: Option, res: &mut ComposeUpResponse, + core_replacers: Vec<(String, String)>, ) -> anyhow::Result<()> { // Write the stack to local disk. For repos, will first delete any existing folder to ensure fresh deploy. // Will also set additional fields on the reponse. @@ -49,7 +50,7 @@ pub async fn compose_up( let root = periphery_config() .stack_dir - .join(to_monitor_name(&stack.name)); + .join(to_komodo_name(&stack.name)); let run_directory = root.join(&stack.config.run_directory); let run_directory = run_directory.canonicalize().context( "failed to validate run directory on host after stack write (canonicalize error)", @@ -143,7 +144,7 @@ pub async fn compose_up( // Pull images before destroying to minimize downtime. // If this fails, do not continue. - let log = run_monitor_command( + let log = run_komodo_command( "compose pull", format!( "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args} pull{service_arg}", @@ -168,15 +169,30 @@ pub async fn compose_up( let env_file = env_file_path .map(|path| format!(" --env-file {}", path.display())) .unwrap_or_default(); - let log = run_monitor_command( - "compose up", - format!( + let command = format!( "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} up -d{extra_args}{service_arg}", - ), - ) - .await; - res.deployed = log.success; - res.logs.push(log); + ); + if stack.config.skip_secret_interp { + let log = run_komodo_command("compose up", command).await; + res.deployed = log.success; + res.logs.push(log); + } else { + let (command, mut replacers) = svi::interpolate_variables( + &command, + &periphery_config().secrets, + svi::Interpolator::DoubleBrackets, + true, + ).context("failed to interpolate periphery secrets into stack run command")?; + replacers.extend(core_replacers); + + let mut log = run_komodo_command("compose up", command).await; + + log.command = svi::replace_in_string(&log.command, &replacers); + log.stdout = svi::replace_in_string(&log.stdout, &replacers); + log.stderr = svi::replace_in_string(&log.stderr, &replacers); + + res.logs.push(log); + } // Unless the files are supposed to be managed on the host, // clean up here, which will also let user know immediately if there will be a problem @@ -203,7 +219,7 @@ async fn write_stack( ) -> anyhow::Result> { let root = periphery_config() .stack_dir - .join(to_monitor_name(&stack.name)); + .join(to_komodo_name(&stack.name)); let run_directory = root.join(&stack.config.run_directory); // This will remove any intermediate '/./' in the path, which is a problem for some OS. // Cannot use canonicalize yet as directory may not exist. @@ -276,7 +292,7 @@ async fn write_stack( // Ensure directory is clear going in. fs::remove_dir_all(&root).await.ok(); - let RepoActionResponseV1_13 { + let RepoActionResponse { logs, commit_hash, commit_message, @@ -289,12 +305,15 @@ async fn write_stack( environment: stack.config.environment.clone(), env_file_path: stack.config.env_file_path.clone(), skip_secret_interp: stack.config.skip_secret_interp, + // repo replacer only needed for on_clone / on_pull, + // which aren't available for stacks + replacers: Default::default(), }, (), ) .await { - Ok(res) => res.into(), + Ok(res) => res, Err(e) => { let error = format_serror( &e.context("failed to clone stack repo").into(), @@ -375,7 +394,7 @@ async fn destroy_existing_containers( .as_ref() .map(|service| format!(" {service}")) .unwrap_or_default(); - let log = run_monitor_command( + let log = run_komodo_command( "destroy container", format!("{docker_compose} -p {project} down{service_arg}"), ) diff --git a/bin/periphery/src/config.rs b/bin/periphery/src/config.rs index 8042387dd..a18647788 100644 --- a/bin/periphery/src/config.rs +++ b/bin/periphery/src/config.rs @@ -1,11 +1,11 @@ use std::sync::OnceLock; use clap::Parser; -use merge_config_files::parse_config_paths; -use monitor_client::entities::{ +use komodo_client::entities::{ config::periphery::{CliArgs, Env, PeripheryConfig}, logger::{LogConfig, LogLevel}, }; +use merge_config_files::parse_config_paths; pub fn periphery_config() -> &'static PeripheryConfig { static PERIPHERY_CONFIG: OnceLock = diff --git a/bin/periphery/src/docker.rs b/bin/periphery/src/docker.rs index 0d0fe5c27..5707c70ba 100644 --- a/bin/periphery/src/docker.rs +++ b/bin/periphery/src/docker.rs @@ -1,19 +1,22 @@ use std::sync::OnceLock; use anyhow::{anyhow, Context}; -use bollard::{container::ListContainersOptions, Docker}; -use command::run_monitor_command; -use monitor_client::entities::{ +use bollard::{ + container::{InspectContainerOptions, ListContainersOptions}, + network::InspectNetworkOptions, + Docker, +}; +use command::run_komodo_command; +use komodo_client::entities::{ build::{ImageRegistry, StandardRegistryConfig}, config::core::AwsEcrConfig, - deployment::{ - ContainerSummary, DockerContainerStats, TerminationSignal, + docker::{ + container::*, image::*, network::*, volume::*, ContainerConfig, + GraphDriverData, HealthConfig, PortBinding, }, - server::{ - docker_image::ImageSummary, docker_network::DockerNetwork, - }, - to_monitor_name, + to_komodo_name, update::Log, + TerminationSignal, }; use run_command::async_run_command; @@ -38,8 +41,8 @@ impl Default for DockerClient { impl DockerClient { pub async fn list_containers( &self, - ) -> anyhow::Result> { - let res = self + ) -> anyhow::Result> { + self .docker .list_containers(Some(ListContainersOptions:: { all: true, @@ -48,61 +51,856 @@ impl DockerClient { .await? .into_iter() .map(|container| { - let info = ContainerSummary { - id: container.id.unwrap_or_default(), + Ok(ContainerListItem { name: container .names .context("no names on container")? .pop() .context("no names on container (empty vec)")? .replace('/', ""), - image: container.image.unwrap_or(String::from("unknown")), + id: container.id, + image: container.image, + image_id: container.image_id, + created: container.created, + size_rw: container.size_rw, + size_root_fs: container.size_root_fs, state: container .state .context("no container state")? .parse() .context("failed to parse container state")?, status: container.status, - labels: container.labels.unwrap_or_default(), network_mode: container .host_config .and_then(|config| config.network_mode), - networks: container.network_settings.and_then(|settings| { - settings - .networks - .map(|networks| networks.into_keys().collect()) - }), - }; - Ok::<_, anyhow::Error>(info) + networks: container + .network_settings + .and_then(|settings| { + settings + .networks + .map(|networks| networks.into_keys().collect()) + }) + .unwrap_or_default(), + volumes: container + .mounts + .map(|settings| { + settings + .into_iter() + .filter_map(|mount| mount.name) + .collect() + }) + .unwrap_or_default(), + }) }) - .collect::>>()?; - Ok(res) + .collect() + } + + pub async fn inspect_container( + &self, + container_name: &str, + ) -> anyhow::Result { + let container = self + .docker + .inspect_container( + container_name, + InspectContainerOptions { size: true }.into(), + ) + .await?; + Ok(Container { + id: container.id, + created: container.created, + path: container.path, + args: container + .args + .unwrap_or_default(), + state: container.state.map(|state| ContainerState { + status: state + .status + .map(|status| match status { + bollard::secret::ContainerStateStatusEnum::EMPTY => { + ContainerStateStatusEnum::Empty + } + bollard::secret::ContainerStateStatusEnum::CREATED => { + ContainerStateStatusEnum::Created + } + bollard::secret::ContainerStateStatusEnum::RUNNING => { + ContainerStateStatusEnum::Running + } + bollard::secret::ContainerStateStatusEnum::PAUSED => { + ContainerStateStatusEnum::Paused + } + bollard::secret::ContainerStateStatusEnum::RESTARTING => { + ContainerStateStatusEnum::Restarting + } + bollard::secret::ContainerStateStatusEnum::REMOVING => { + ContainerStateStatusEnum::Removing + } + bollard::secret::ContainerStateStatusEnum::EXITED => { + ContainerStateStatusEnum::Exited + } + bollard::secret::ContainerStateStatusEnum::DEAD => { + ContainerStateStatusEnum::Dead + } + }) + .unwrap_or_default(), + running: state.running, + paused: state.paused, + restarting: state.restarting, + oom_killed: state.oom_killed, + dead: state.dead, + pid: state.pid, + exit_code: state.exit_code, + error: state.error, + started_at: state.started_at, + finished_at: state.finished_at, + health: state.health.map(|health| ContainerHealth { + status: health + .status + .map(|status| match status { + bollard::secret::HealthStatusEnum::EMPTY => { + HealthStatusEnum::Empty + } + bollard::secret::HealthStatusEnum::NONE => { + HealthStatusEnum::None + } + bollard::secret::HealthStatusEnum::STARTING => { + HealthStatusEnum::Starting + } + bollard::secret::HealthStatusEnum::HEALTHY => { + HealthStatusEnum::Healthy + } + bollard::secret::HealthStatusEnum::UNHEALTHY => { + HealthStatusEnum::Unhealthy + } + }) + .unwrap_or_default(), + failing_streak: health.failing_streak, + log: health + .log + .map(|log| { + log + .into_iter() + .map(|log| HealthcheckResult { + start: log.start, + end: log.end, + exit_code: log.exit_code, + output: log.output, + }) + .collect() + }) + .unwrap_or_default(), + }), + }), + image: container.image, + resolv_conf_path: container.resolv_conf_path, + hostname_path: container.hostname_path, + hosts_path: container.hosts_path, + log_path: container.log_path, + name: container.name, + restart_count: container.restart_count, + driver: container.driver, + platform: container.platform, + mount_label: container.mount_label, + process_label: container.process_label, + app_armor_profile: container.app_armor_profile, + exec_ids: container.exec_ids.unwrap_or_default(), + host_config: container.host_config.map(|config| HostConfig { + cpu_shares: config.cpu_shares, + memory: config.memory, + cgroup_parent: config.cgroup_parent, + blkio_weight: config.blkio_weight, + blkio_weight_device: config + .blkio_weight_device + .unwrap_or_default() + .into_iter() + .map(|device| ResourcesBlkioWeightDevice { + path: device.path, + weight: device.weight, + }) + .collect(), + blkio_device_read_bps: config + .blkio_device_read_bps + .unwrap_or_default() + .into_iter() + .map(|bp| ThrottleDevice { + path: bp.path, + rate: bp.rate, + }) + .collect(), + blkio_device_write_bps: config + .blkio_device_write_bps + .unwrap_or_default() + .into_iter() + .map(|bp| ThrottleDevice { + path: bp.path, + rate: bp.rate, + }) + .collect(), + blkio_device_read_iops: config + .blkio_device_read_iops + .unwrap_or_default() + .into_iter() + .map(|iops| ThrottleDevice { + path: iops.path, + rate: iops.rate, + }) + .collect(), + blkio_device_write_iops: config + .blkio_device_write_iops + .unwrap_or_default() + .into_iter() + .map(|iops| ThrottleDevice { + path: iops.path, + rate: iops.rate, + }) + .collect(), + cpu_period: config.cpu_period, + cpu_quota: config.cpu_quota, + cpu_realtime_period: config.cpu_realtime_period, + cpu_realtime_runtime: config.cpu_realtime_runtime, + cpuset_cpus: config.cpuset_cpus, + cpuset_mems: config.cpuset_mems, + devices: config + .devices + .unwrap_or_default() + .into_iter() + .map(|device| DeviceMapping { + path_on_host: device.path_on_host, + path_in_container: device.path_in_container, + cgroup_permissions: device.cgroup_permissions, + }) + .collect(), + device_cgroup_rules: config + .device_cgroup_rules + .unwrap_or_default(), + device_requests: config + .device_requests + .unwrap_or_default() + .into_iter() + .map(|request| DeviceRequest { + driver: request.driver, + count: request.count, + device_ids: request.device_ids.unwrap_or_default(), + capabilities: request.capabilities.unwrap_or_default(), + options: request.options.unwrap_or_default(), + }) + .collect(), + kernel_memory_tcp: config.kernel_memory_tcp, + memory_reservation: config.memory_reservation, + memory_swap: config.memory_swap, + memory_swappiness: config.memory_swappiness, + nano_cpus: config.nano_cpus, + oom_kill_disable: config.oom_kill_disable, + init: config.init, + pids_limit: config.pids_limit, + ulimits: config + .ulimits + .unwrap_or_default() + .into_iter() + .map(|ulimit| ResourcesUlimits { + name: ulimit.name, + soft: ulimit.soft, + hard: ulimit.hard, + }) + .collect(), + cpu_count: config.cpu_count, + cpu_percent: config.cpu_percent, + io_maximum_iops: config.io_maximum_iops, + io_maximum_bandwidth: config.io_maximum_bandwidth, + binds: config.binds.unwrap_or_default(), + container_id_file: config.container_id_file, + log_config: config.log_config.map(|config| { + HostConfigLogConfig { + typ: config.typ, + config: config.config.unwrap_or_default(), + } + }), + network_mode: config.network_mode, + port_bindings: config + .port_bindings + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|v| PortBinding { + host_ip: v.host_ip, + host_port: v.host_port, + }).collect())) + .collect(), + restart_policy: config.restart_policy.map(|policy| { + RestartPolicy { + name: policy.name.map(|policy| match policy { + bollard::secret::RestartPolicyNameEnum::EMPTY => RestartPolicyNameEnum::Empty, + bollard::secret::RestartPolicyNameEnum::NO => RestartPolicyNameEnum::No, + bollard::secret::RestartPolicyNameEnum::ALWAYS => RestartPolicyNameEnum::Always, + bollard::secret::RestartPolicyNameEnum::UNLESS_STOPPED => RestartPolicyNameEnum::UnlessStopped, + bollard::secret::RestartPolicyNameEnum::ON_FAILURE => RestartPolicyNameEnum::OnFailure, + }).unwrap_or_default(), + maximum_retry_count: policy.maximum_retry_count, + } + }), + auto_remove: config.auto_remove, + volume_driver: config.volume_driver, + volumes_from: config.volumes_from.unwrap_or_default(), + mounts: config.mounts + .unwrap_or_default().into_iter() + .map(|mount| ContainerMount { + target: mount.target, + source: mount.source, + typ: mount.typ.map(|typ| match typ { + bollard::secret::MountTypeEnum::EMPTY => MountTypeEnum::Empty, + bollard::secret::MountTypeEnum::BIND => MountTypeEnum::Bind, + bollard::secret::MountTypeEnum::VOLUME => MountTypeEnum::Volume, + bollard::secret::MountTypeEnum::TMPFS => MountTypeEnum::Tmpfs, + bollard::secret::MountTypeEnum::NPIPE => MountTypeEnum::Npipe, + bollard::secret::MountTypeEnum::CLUSTER => MountTypeEnum::Cluster, + }).unwrap_or_default(), + read_only: mount.read_only, + consistency: mount.consistency, + bind_options: mount.bind_options.map(|options| MountBindOptions { + propagation: options.propagation.map(|propogation| match propogation { + bollard::secret::MountBindOptionsPropagationEnum::EMPTY => MountBindOptionsPropagationEnum::Empty, + bollard::secret::MountBindOptionsPropagationEnum::PRIVATE => MountBindOptionsPropagationEnum::Private, + bollard::secret::MountBindOptionsPropagationEnum::RPRIVATE => MountBindOptionsPropagationEnum::Rprivate, + bollard::secret::MountBindOptionsPropagationEnum::SHARED => MountBindOptionsPropagationEnum::Shared, + bollard::secret::MountBindOptionsPropagationEnum::RSHARED => MountBindOptionsPropagationEnum::Rshared, + bollard::secret::MountBindOptionsPropagationEnum::SLAVE => MountBindOptionsPropagationEnum::Slave, + bollard::secret::MountBindOptionsPropagationEnum::RSLAVE => MountBindOptionsPropagationEnum::Rslave, + }).unwrap_or_default(), + non_recursive: options.non_recursive, + create_mountpoint: options.create_mountpoint, + read_only_non_recursive: options.read_only_non_recursive, + read_only_force_recursive: options.read_only_force_recursive, + }), + volume_options: mount.volume_options.map(|options| MountVolumeOptions { + no_copy: options.no_copy, + labels: options.labels.unwrap_or_default(), + driver_config: options.driver_config.map(|config| MountVolumeOptionsDriverConfig { + name: config.name, + options: config.options.unwrap_or_default(), + }), + subpath: options.subpath, + }), + tmpfs_options: mount.tmpfs_options.map(|options| MountTmpfsOptions { + size_bytes: options.size_bytes, + mode: options.mode + }), + }).collect(), + console_size: config.console_size.unwrap_or_default(), + annotations: config.annotations.unwrap_or_default(), + cap_add: config.cap_add.unwrap_or_default(), + cap_drop: config.cap_drop.unwrap_or_default(), + cgroupns_mode: config.cgroupns_mode.map(|mode| match mode { + bollard::secret::HostConfigCgroupnsModeEnum::EMPTY => HostConfigCgroupnsModeEnum::Empty, + bollard::secret::HostConfigCgroupnsModeEnum::PRIVATE => HostConfigCgroupnsModeEnum::Private, + bollard::secret::HostConfigCgroupnsModeEnum::HOST => HostConfigCgroupnsModeEnum::Host, + }), + dns: config.dns.unwrap_or_default(), + dns_options: config.dns_options.unwrap_or_default(), + dns_search: config.dns_search.unwrap_or_default(), + extra_hosts: config.extra_hosts.unwrap_or_default(), + group_add: config.group_add.unwrap_or_default(), + ipc_mode: config.ipc_mode, + cgroup: config.cgroup, + links: config.links.unwrap_or_default(), + oom_score_adj: config.oom_score_adj, + pid_mode: config.pid_mode, + privileged: config.privileged, + publish_all_ports: config.publish_all_ports, + readonly_rootfs: config.readonly_rootfs, + security_opt: config.security_opt.unwrap_or_default(), + storage_opt: config.storage_opt.unwrap_or_default(), + tmpfs: config.tmpfs.unwrap_or_default(), + uts_mode: config.uts_mode, + userns_mode: config.userns_mode, + shm_size: config.shm_size, + sysctls: config.sysctls.unwrap_or_default(), + runtime: config.runtime, + isolation: config.isolation.map(|isolation| match isolation { + bollard::secret::HostConfigIsolationEnum::EMPTY => HostConfigIsolationEnum::Empty, + bollard::secret::HostConfigIsolationEnum::DEFAULT => HostConfigIsolationEnum::Default, + bollard::secret::HostConfigIsolationEnum::PROCESS => HostConfigIsolationEnum::Process, + bollard::secret::HostConfigIsolationEnum::HYPERV => HostConfigIsolationEnum::Hyperv, + }).unwrap_or_default(), + masked_paths: config.masked_paths.unwrap_or_default(), + readonly_paths: config.readonly_paths.unwrap_or_default(), + }), + graph_driver: container.graph_driver.map(|driver| GraphDriverData { + name: driver.name, + data: driver.data, + }), + size_rw: container.size_rw, + size_root_fs: container.size_root_fs, + mounts: container.mounts.unwrap_or_default().into_iter().map(|mount| MountPoint { + typ: mount.typ.map(|typ| match typ { + bollard::secret::MountPointTypeEnum::EMPTY => MountTypeEnum::Empty, + bollard::secret::MountPointTypeEnum::BIND => MountTypeEnum::Bind, + bollard::secret::MountPointTypeEnum::VOLUME => MountTypeEnum::Volume, + bollard::secret::MountPointTypeEnum::TMPFS => MountTypeEnum::Tmpfs, + bollard::secret::MountPointTypeEnum::NPIPE => MountTypeEnum::Npipe, + bollard::secret::MountPointTypeEnum::CLUSTER => MountTypeEnum::Cluster, + }).unwrap_or_default(), + name: mount.name, + source: mount.source, + destination: mount.destination, + driver: mount.driver, + mode: mount.mode, + rw: mount.rw, + propagation: mount.propagation, + }).collect(), + config: container.config.map(|config| ContainerConfig { + hostname: config.hostname, + domainname: config.domainname, + user: config.user, + attach_stdin: config.attach_stdin, + attach_stdout: config.attach_stdout, + attach_stderr: config.attach_stderr, + exposed_ports: config.exposed_ports.unwrap_or_default().into_keys().map(|k| (k, Default::default())).collect(), + tty: config.tty, + open_stdin: config.open_stdin, + stdin_once: config.stdin_once, + env: config.env.unwrap_or_default(), + cmd: config.cmd.unwrap_or_default(), + healthcheck: config.healthcheck.map(|health| HealthConfig { + test: health.test.unwrap_or_default(), + interval: health.interval, + timeout: health.timeout, + retries: health.retries, + start_period: health.start_period, + start_interval: health.start_interval, + }), + args_escaped: config.args_escaped, + image: config.image, + volumes: config.volumes.unwrap_or_default().into_keys().map(|k| (k, Default::default())).collect(), + working_dir: config.working_dir, + entrypoint: config.entrypoint.unwrap_or_default(), + network_disabled: config.network_disabled, + mac_address: config.mac_address, + on_build: config.on_build.unwrap_or_default(), + labels: config.labels.unwrap_or_default(), + stop_signal: config.stop_signal, + stop_timeout: config.stop_timeout, + shell: config.shell.unwrap_or_default(), + }), + network_settings: container.network_settings.map(|settings| NetworkSettings { + bridge: settings.bridge, + sandbox_id: settings.sandbox_id, + ports: settings + .ports + .unwrap_or_default() + .into_iter() + .map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|v| PortBinding { host_ip: v.host_ip, host_port: v.host_port }).collect())).collect(), + sandbox_key: settings.sandbox_key, + networks: settings.networks + .unwrap_or_default().into_iter() + .map(|(k, v)| (k, EndpointSettings { + ipam_config: v.ipam_config.map(|ipam| EndpointIpamConfig { + ipv4_address: ipam.ipv4_address, + ipv6_address: ipam.ipv6_address, + link_local_ips: ipam.link_local_ips.unwrap_or_default(), + }), + links: v.links.unwrap_or_default(), + mac_address: v.mac_address, + aliases: v.aliases.unwrap_or_default(), + network_id: v.network_id, + endpoint_id: v.endpoint_id, + gateway: v.gateway, + ip_address: v.ip_address, + ip_prefix_len: v.ip_prefix_len, + ipv6_gateway: v.ipv6_gateway, + global_ipv6_address: v.global_ipv6_address, + global_ipv6_prefix_len: v.global_ipv6_prefix_len, + driver_opts: v.driver_opts.unwrap_or_default(), + dns_names: v.dns_names.unwrap_or_default() + })).collect(), + }), + }) } pub async fn list_networks( &self, - ) -> anyhow::Result> { - let networks = self + containers: &[ContainerListItem], + ) -> anyhow::Result> { + self .docker .list_networks::(None) .await? .into_iter() - .map(|network| network.into()) - .collect(); - Ok(networks) + .map(|network| { + let (ipam_driver, ipam_subnet, ipam_gateway) = + if let Some(ipam) = network.ipam { + let (subnet, gateway) = if let Some(config) = ipam + .config + .and_then(|configs| configs.into_iter().next()) + { + (config.subnet, config.gateway) + } else { + (None, None) + }; + (ipam.driver, subnet, gateway) + } else { + (None, None, None) + }; + let in_use = match &network.name { + Some(name) => containers.iter().any(|container| { + container.networks.iter().any(|_name| name == _name) + }), + None => false, + }; + Ok(NetworkListItem { + name: network.name, + id: network.id, + created: network.created, + scope: network.scope, + driver: network.driver, + enable_ipv6: network.enable_ipv6, + ipam_driver, + ipam_subnet, + ipam_gateway, + internal: network.internal, + attachable: network.attachable, + ingress: network.ingress, + in_use, + }) + }) + .collect() + } + + pub async fn inspect_network( + &self, + network_name: &str, + ) -> anyhow::Result { + let network = self + .docker + .inspect_network::( + network_name, + InspectNetworkOptions { + verbose: true, + ..Default::default() + } + .into(), + ) + .await?; + Ok(Network { + name: network.name, + id: network.id, + created: network.created, + scope: network.scope, + driver: network.driver, + enable_ipv6: network.enable_ipv6, + ipam: network.ipam.map(|ipam| Ipam { + driver: ipam.driver, + config: ipam + .config + .unwrap_or_default() + .into_iter() + .map(|config| IpamConfig { + subnet: config.subnet, + ip_range: config.ip_range, + gateway: config.gateway, + auxiliary_addresses: config + .auxiliary_addresses + .unwrap_or_default(), + }) + .collect(), + options: ipam.options.unwrap_or_default(), + }), + internal: network.internal, + attachable: network.attachable, + ingress: network.ingress, + containers: network + .containers + .unwrap_or_default() + .into_iter() + .map(|(container_id, container)| NetworkContainer { + container_id, + name: container.name, + endpoint_id: container.endpoint_id, + mac_address: container.mac_address, + ipv4_address: container.ipv4_address, + ipv6_address: container.ipv6_address, + }) + .collect(), + options: network.options.unwrap_or_default(), + labels: network.labels.unwrap_or_default(), + }) } pub async fn list_images( &self, - ) -> anyhow::Result> { - let images = self + containers: &[ContainerListItem], + ) -> anyhow::Result> { + self .docker .list_images::(None) .await? .into_iter() - .map(|i| i.into()) + .map(|image| { + let in_use = containers.iter().any(|container| { + container + .image_id + .as_ref() + .map(|id| id == &image.id) + .unwrap_or_default() + }); + Ok(ImageListItem { + name: image + .repo_tags + .into_iter() + .next() + .unwrap_or_else(|| image.id.clone()), + id: image.id, + parent_id: image.parent_id, + created: image.created, + size: image.size, + in_use, + }) + }) + .collect() + } + + pub async fn inspect_image( + &self, + image_name: &str, + ) -> anyhow::Result { + let image = self.docker.inspect_image(image_name).await?; + Ok(Image { + id: image.id, + repo_tags: image.repo_tags.unwrap_or_default(), + repo_digests: image.repo_digests.unwrap_or_default(), + parent: image.parent, + comment: image.comment, + created: image.created, + docker_version: image.docker_version, + author: image.author, + architecture: image.architecture, + variant: image.variant, + os: image.os, + os_version: image.os_version, + size: image.size, + graph_driver: image.graph_driver.map(|driver| { + GraphDriverData { + name: driver.name, + data: driver.data, + } + }), + root_fs: image.root_fs.map(|fs| ImageInspectRootFs { + typ: fs.typ, + layers: fs.layers.unwrap_or_default(), + }), + metadata: image.metadata.map(|metadata| ImageInspectMetadata { + last_tag_time: metadata.last_tag_time, + }), + config: image.config.map(|config| ContainerConfig { + hostname: config.hostname, + domainname: config.domainname, + user: config.user, + attach_stdin: config.attach_stdin, + attach_stdout: config.attach_stdout, + attach_stderr: config.attach_stderr, + exposed_ports: config + .exposed_ports + .unwrap_or_default() + .into_keys() + .map(|k| (k, Default::default())) + .collect(), + tty: config.tty, + open_stdin: config.open_stdin, + stdin_once: config.stdin_once, + env: config.env.unwrap_or_default(), + cmd: config.cmd.unwrap_or_default(), + healthcheck: config.healthcheck.map(|health| HealthConfig { + test: health.test.unwrap_or_default(), + interval: health.interval, + timeout: health.timeout, + retries: health.retries, + start_period: health.start_period, + start_interval: health.start_interval, + }), + args_escaped: config.args_escaped, + image: config.image, + volumes: config + .volumes + .unwrap_or_default() + .into_keys() + .map(|k| (k, Default::default())) + .collect(), + working_dir: config.working_dir, + entrypoint: config.entrypoint.unwrap_or_default(), + network_disabled: config.network_disabled, + mac_address: config.mac_address, + on_build: config.on_build.unwrap_or_default(), + labels: config.labels.unwrap_or_default(), + stop_signal: config.stop_signal, + stop_timeout: config.stop_timeout, + shell: config.shell.unwrap_or_default(), + }), + }) + } + + pub async fn image_history( + &self, + image_name: &str, + ) -> anyhow::Result> { + let res = self + .docker + .image_history(image_name) + .await? + .into_iter() + .map(|image| ImageHistoryResponseItem { + id: image.id, + created: image.created, + created_by: image.created_by, + tags: image.tags, + size: image.size, + comment: image.comment, + }) .collect(); - Ok(images) + Ok(res) + } + + pub async fn list_volumes( + &self, + containers: &[ContainerListItem], + ) -> anyhow::Result> { + self + .docker + .list_volumes::(None) + .await? + .volumes + .unwrap_or_default() + .into_iter() + .map(|volume| { + let scope = volume + .scope + .map(|scope| match scope { + bollard::secret::VolumeScopeEnum::EMPTY => { + VolumeScopeEnum::Empty + } + bollard::secret::VolumeScopeEnum::LOCAL => { + VolumeScopeEnum::Local + } + bollard::secret::VolumeScopeEnum::GLOBAL => { + VolumeScopeEnum::Global + } + }) + .unwrap_or(VolumeScopeEnum::Empty); + let in_use = containers.iter().any(|container| { + container.volumes.iter().any(|name| &volume.name == name) + }); + Ok(VolumeListItem { + name: volume.name, + driver: volume.driver, + mountpoint: volume.mountpoint, + created: volume.created_at, + size: volume.usage_data.map(|data| data.size), + scope, + in_use, + }) + }) + .collect() + } + + pub async fn inspect_volume( + &self, + volume_name: &str, + ) -> anyhow::Result { + let volume = self.docker.inspect_volume(volume_name).await?; + Ok(Volume { + name: volume.name, + driver: volume.driver, + mountpoint: volume.mountpoint, + created_at: volume.created_at, + status: volume.status.unwrap_or_default().into_keys().map(|k| (k, Default::default())).collect(), + labels: volume.labels, + scope: volume + .scope + .map(|scope| match scope { + bollard::secret::VolumeScopeEnum::EMPTY => { + VolumeScopeEnum::Empty + } + bollard::secret::VolumeScopeEnum::LOCAL => { + VolumeScopeEnum::Local + } + bollard::secret::VolumeScopeEnum::GLOBAL => { + VolumeScopeEnum::Global + } + }) + .unwrap_or_default(), + cluster_volume: volume.cluster_volume.map(|volume| { + ClusterVolume { + id: volume.id, + version: volume.version.map(|version| ObjectVersion { + index: version.index, + }), + created_at: volume.created_at, + updated_at: volume.updated_at, + spec: volume.spec.map(|spec| ClusterVolumeSpec { + group: spec.group, + access_mode: spec.access_mode.map(|mode| { + ClusterVolumeSpecAccessMode { + scope: mode.scope.map(|scope| match scope { + bollard::secret::ClusterVolumeSpecAccessModeScopeEnum::EMPTY => ClusterVolumeSpecAccessModeScopeEnum::Empty, + bollard::secret::ClusterVolumeSpecAccessModeScopeEnum::SINGLE => ClusterVolumeSpecAccessModeScopeEnum::Single, + bollard::secret::ClusterVolumeSpecAccessModeScopeEnum::MULTI => ClusterVolumeSpecAccessModeScopeEnum::Multi, + }).unwrap_or_default(), + sharing: mode.sharing.map(|sharing| match sharing { + bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::EMPTY => ClusterVolumeSpecAccessModeSharingEnum::Empty, + bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::NONE => ClusterVolumeSpecAccessModeSharingEnum::None, + bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::READONLY => ClusterVolumeSpecAccessModeSharingEnum::Readonly, + bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::ONEWRITER => ClusterVolumeSpecAccessModeSharingEnum::Onewriter, + bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::ALL => ClusterVolumeSpecAccessModeSharingEnum::All, + }).unwrap_or_default(), + secrets: mode.secrets.unwrap_or_default().into_iter().map(|secret| ClusterVolumeSpecAccessModeSecrets { + key: secret.key, + secret: secret.secret, + }).collect(), + accessibility_requirements: mode + .accessibility_requirements.map(|req| ClusterVolumeSpecAccessModeAccessibilityRequirements { + requisite: req.requisite.unwrap_or_default().into_iter().map(|map| map.into_iter().map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|p| PortBinding { host_ip: p.host_ip, host_port: p.host_port }).collect())).collect()).collect(), + preferred: req.preferred.unwrap_or_default().into_iter().map(|map| map.into_iter().map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|p| PortBinding { host_ip: p.host_ip, host_port: p.host_port }).collect())).collect()).collect(), + }), + capacity_range: mode.capacity_range.map(|range| ClusterVolumeSpecAccessModeCapacityRange { + required_bytes: range.required_bytes, + limit_bytes: range.limit_bytes, + }), + availability: mode.availability.map(|availability| match availability { + bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::EMPTY => ClusterVolumeSpecAccessModeAvailabilityEnum::Empty, + bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::ACTIVE => ClusterVolumeSpecAccessModeAvailabilityEnum::Active, + bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::PAUSE => ClusterVolumeSpecAccessModeAvailabilityEnum::Pause, + bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::DRAIN => ClusterVolumeSpecAccessModeAvailabilityEnum::Drain, + }).unwrap_or_default(), + } + }), + }), + info: volume.info.map(|info| ClusterVolumeInfo { + capacity_bytes: info.capacity_bytes, + volume_context: info.volume_context.unwrap_or_default(), + volume_id: info.volume_id, + accessible_topology: info.accessible_topology.unwrap_or_default().into_iter().map(|map| map.into_iter().map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|p| PortBinding { host_ip: p.host_ip, host_port: p.host_port }).collect())).collect()).collect(), + }), + publish_status: volume + .publish_status + .unwrap_or_default() + .into_iter() + .map(|status| ClusterVolumePublishStatus { + node_id: status.node_id, + state: status.state.map(|state| match state { + bollard::secret::ClusterVolumePublishStatusStateEnum::EMPTY => ClusterVolumePublishStatusStateEnum::Empty, + bollard::secret::ClusterVolumePublishStatusStateEnum::PENDING_PUBLISH => ClusterVolumePublishStatusStateEnum::PendingPublish, + bollard::secret::ClusterVolumePublishStatusStateEnum::PUBLISHED => ClusterVolumePublishStatusStateEnum::Published, + bollard::secret::ClusterVolumePublishStatusStateEnum::PENDING_NODE_UNPUBLISH => ClusterVolumePublishStatusStateEnum::PendingNodeUnpublish, + bollard::secret::ClusterVolumePublishStatusStateEnum::PENDING_CONTROLLER_UNPUBLISH => ClusterVolumePublishStatusStateEnum::PendingControllerUnpublish, + }).unwrap_or_default(), + publish_context: status.publish_context.unwrap_or_default(), + }) + .collect(), + } + }), + options: volume.options, + usage_data: volume.usage_data.map(|data| VolumeUsageData { + size: data.size, + ref_count: data.ref_count, + }), + }) } } @@ -173,7 +971,7 @@ pub async fn docker_login( #[instrument] pub async fn pull_image(image: &str) -> Log { let command = format!("docker pull {image}"); - run_monitor_command("docker pull", command).await + run_komodo_command("docker pull", command).await } pub fn stop_container_command( @@ -181,7 +979,7 @@ pub fn stop_container_command( signal: Option, time: Option, ) -> String { - let container_name = to_monitor_name(container_name); + let container_name = to_komodo_name(container_name); let signal = signal .map(|signal| format!(" --signal {signal}")) .unwrap_or_default(); @@ -193,7 +991,7 @@ pub fn stop_container_command( pub async fn container_stats( container_name: Option, -) -> anyhow::Result> { +) -> anyhow::Result> { let format = "--format \"{{ json . }}\""; let container_name = match container_name { Some(name) => format!(" {name}"), @@ -203,7 +1001,7 @@ pub async fn container_stats( format!("docker stats{container_name} --no-stream {format}"); let output = async_run_command(&command).await; if output.success() { - let res = output + output .stdout .split('\n') .filter(|e| !e.is_empty()) @@ -212,8 +1010,7 @@ pub async fn container_stats( .context(format!("failed at parsing entry {e}"))?; Ok(parsed) }) - .collect::>>()?; - Ok(res) + .collect() } else { Err(anyhow!("{}", output.stderr.replace('\n', ""))) } diff --git a/bin/periphery/src/helpers.rs b/bin/periphery/src/helpers.rs index 7c4aa555a..317713188 100644 --- a/bin/periphery/src/helpers.rs +++ b/bin/periphery/src/helpers.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::entities::{EnvironmentVar, SearchCombinator}; +use komodo_client::entities::{EnvironmentVar, SearchCombinator}; use crate::config::periphery_config; diff --git a/bin/periphery/src/main.rs b/bin/periphery/src/main.rs index 01df72e31..596c6c3c3 100644 --- a/bin/periphery/src/main.rs +++ b/bin/periphery/src/main.rs @@ -32,7 +32,7 @@ async fn app() -> anyhow::Result<()> { .await .context("failed to bind tcp listener")?; - info!("monitor core started on {}", socket_addr); + info!("Komodo Periphery started on {}", socket_addr); axum::serve( listener, diff --git a/bin/periphery/src/stats.rs b/bin/periphery/src/stats.rs index c904152bb..2bb91f3d3 100644 --- a/bin/periphery/src/stats.rs +++ b/bin/periphery/src/stats.rs @@ -1,7 +1,9 @@ use std::{cmp::Ordering, sync::OnceLock}; use async_timing_util::wait_until_timelength; -use monitor_client::entities::server::stats::*; +use komodo_client::entities::stats::{ + SingleDiskUsage, SystemInformation, SystemProcess, SystemStats, +}; use sysinfo::System; use tokio::sync::RwLock; diff --git a/bin/update_logger/Cargo.toml b/bin/update_logger/Cargo.toml index 0727edb1f..10b06dc1a 100644 --- a/bin/update_logger/Cargo.toml +++ b/bin/update_logger/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true [dependencies] # local -monitor_client.workspace = true +komodo_client.workspace = true logger.workspace = true # external tokio.workspace = true diff --git a/bin/update_logger/Dockerfile b/bin/update_logger/Dockerfile index 468b18010..713360383 100644 --- a/bin/update_logger/Dockerfile +++ b/bin/update_logger/Dockerfile @@ -1,11 +1,11 @@ -FROM rust:1.71.1 as builder +FROM rust:1.80.1 as builder WORKDIR /builder COPY . . RUN cargo build -p update_logger --release -FROM gcr.io/distroless/cc +FROM gcr.io/distroless/debian-cc COPY --from=builder /builder/target/release/update_logger / diff --git a/bin/update_logger/src/main.rs b/bin/update_logger/src/main.rs index 1f99e61c1..ded55fa2a 100644 --- a/bin/update_logger/src/main.rs +++ b/bin/update_logger/src/main.rs @@ -1,16 +1,16 @@ #[macro_use] extern crate tracing; -use monitor_client::MonitorClient; +use komodo_client::KomodoClient; async fn app() -> anyhow::Result<()> { logger::init(&Default::default())?; info!("v {}", env!("CARGO_PKG_VERSION")); - let monitor = MonitorClient::new_from_env().await?; + let komodo = KomodoClient::new_from_env().await?; - let (mut rx, _) = monitor.subscribe_to_updates(1000, 5)?; + let (mut rx, _) = komodo.subscribe_to_updates(1000, 5)?; loop { let msg = rx.recv().await; diff --git a/changelog.md b/changelog.md index cded784e5..0ed88d2dc 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,20 @@ # Changelog +## Komodo v1.13 (Sep 2024) +- Renamed the project to **Komodo**. +- Manage docker networks, volumes, and images. +- Manage Containers at the server level, without creating any Deployment. +- Add bulk Start / Restart / Pause actions for all containers on a server. +- Add **Secret** mode to Variables to hide the value in updates / logs + - Secret mode also prevents any non-admin users from retrieving the value from the API. Non admin users will still see the variable name. +- Interpolate Variables / Secrets into everything I could think of + - Deployment / Stack / Repo / Build **extra args**. + - Deployment **command**. + - Build **pre build**. + - Repo **on_clone / on_pull**. +- Added **Hetzner Singapore** datacenter for Hetzner ServerTemplates +- **Removed Google Font** - now just use system local font to avoid any third party calls. + ## Monitor v1.13 - Komodo (Aug 2024) - This is the first named release, as I think it is really big. The Komodo Dragon is the largest species of Monitor lizard. - **Deploy docker compose** with the new **Stack** resource. diff --git a/client/core/rs/Cargo.toml b/client/core/rs/Cargo.toml index 0291cb962..71b1e72cb 100644 --- a/client/core/rs/Cargo.toml +++ b/client/core/rs/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "monitor_client" -description = "Client for the monitor build and deployment system" +name = "komodo_client" +description = "Client for the Komodo build and deployment system" version.workspace = true edition.workspace = true authors.workspace = true @@ -12,7 +12,6 @@ repository.workspace = true [features] mongo = ["dep:mongo_indexed"] -docker = ["dep:bollard"] [dependencies] # mogh @@ -25,7 +24,6 @@ derive_variants.workspace = true resolver_api.workspace = true serror.workspace = true # external -bollard = { workspace = true, optional = true } tokio-tungstenite.workspace = true derive_builder.workspace = true serde_json.workspace = true diff --git a/client/core/rs/README.md b/client/core/rs/README.md index 5362c94db..51edef881 100644 --- a/client/core/rs/README.md +++ b/client/core/rs/README.md @@ -1,4 +1,4 @@ -# Monitor +# Komodo *A system to build and deploy software accross many servers* -Docs: [https://docs.rs/monitor_client/latest/monitor_client](https://docs.rs/monitor_client/latest/monitor_client) \ No newline at end of file +Docs: [https://docs.rs/komodo_client/latest/komodo_client](https://docs.rs/komodo_client/latest/komodo_client) \ No newline at end of file diff --git a/client/core/rs/src/api/auth.rs b/client/core/rs/src/api/auth.rs index 7dc99e3cd..5348ae57c 100644 --- a/client/core/rs/src/api/auth.rs +++ b/client/core/rs/src/api/auth.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::user::User; -pub trait MonitorAuthRequest: HasResponse {} +pub trait KomodoAuthRequest: HasResponse {} /// JSON containing an authentication token. #[typeshare] @@ -18,13 +18,13 @@ pub struct JwtResponse { // /// Non authenticated route to see the available options -/// users have to login to monitor, eg. local auth, github, google. +/// users have to login to Komodo, eg. local auth, github, google. /// Response: [GetLoginOptionsResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorAuthRequest)] +#[empty_traits(KomodoAuthRequest)] #[response(GetLoginOptionsResponse)] pub struct GetLoginOptions {} @@ -51,7 +51,7 @@ pub struct GetLoginOptionsResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorAuthRequest)] +#[empty_traits(KomodoAuthRequest)] #[response(CreateLocalUserResponse)] pub struct CreateLocalUser { /// The username for the new user. @@ -75,7 +75,7 @@ pub type CreateLocalUserResponse = JwtResponse; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorAuthRequest)] +#[empty_traits(KomodoAuthRequest)] #[response(LoginLocalUserResponse)] pub struct LoginLocalUser { /// The user's username @@ -97,7 +97,7 @@ pub type LoginLocalUserResponse = JwtResponse; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorAuthRequest)] +#[empty_traits(KomodoAuthRequest)] #[response(ExchangeForJwtResponse)] pub struct ExchangeForJwt { /// The 'exchange token' @@ -116,7 +116,7 @@ pub type ExchangeForJwtResponse = JwtResponse; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorAuthRequest)] +#[empty_traits(KomodoAuthRequest)] #[response(GetUserResponse)] pub struct GetUser {} diff --git a/client/core/rs/src/api/execute/build.rs b/client/core/rs/src/api/execute/build.rs index be5af7e0c..63b1ec217 100644 --- a/client/core/rs/src/api/execute/build.rs +++ b/client/core/rs/src/api/execute/build.rs @@ -6,7 +6,7 @@ use typeshare::typeshare; use crate::entities::update::Update; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; // @@ -27,7 +27,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct RunBuild { /// Can be build id or name @@ -50,7 +50,7 @@ pub struct RunBuild { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct CancelBuild { /// Can be id or name diff --git a/client/core/rs/src/api/execute/deployment.rs b/client/core/rs/src/api/execute/deployment.rs index ee0c7673e..6c702654b 100644 --- a/client/core/rs/src/api/execute/deployment.rs +++ b/client/core/rs/src/api/execute/deployment.rs @@ -4,11 +4,9 @@ use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::{ - deployment::TerminationSignal, update::Update, -}; +use crate::entities::{update::Update, TerminationSignal}; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; /// Deploys the container for the target deployment. Response: [Update]. /// @@ -28,7 +26,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct Deploy { /// Name or id @@ -57,9 +55,9 @@ pub struct Deploy { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] -pub struct StartContainer { +pub struct StartDeployment { /// Name or id pub deployment: String, } @@ -80,9 +78,9 @@ pub struct StartContainer { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] -pub struct RestartContainer { +pub struct RestartDeployment { /// Name or id pub deployment: String, } @@ -103,9 +101,9 @@ pub struct RestartContainer { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] -pub struct PauseContainer { +pub struct PauseDeployment { /// Name or id pub deployment: String, } @@ -128,9 +126,9 @@ pub struct PauseContainer { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] -pub struct UnpauseContainer { +pub struct UnpauseDeployment { /// Name or id pub deployment: String, } @@ -151,9 +149,9 @@ pub struct UnpauseContainer { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] -pub struct StopContainer { +pub struct StopDeployment { /// Name or id pub deployment: String, /// Override the default termination signal specified in the deployment. @@ -164,7 +162,7 @@ pub struct StopContainer { // -/// Stops and removes the container for the target deployment. +/// Stops and destroys the container for the target deployment. /// Reponse: [Update]. /// /// 1. The container is stopped and removed using `docker container rm ${container_name}`. @@ -179,9 +177,9 @@ pub struct StopContainer { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] -pub struct RemoveContainer { +pub struct DestroyDeployment { /// Name or id. pub deployment: String, /// Override the default termination signal specified in the deployment. diff --git a/client/core/rs/src/api/execute/mod.rs b/client/core/rs/src/api/execute/mod.rs index 279d7552c..d2b962f18 100644 --- a/client/core/rs/src/api/execute/mod.rs +++ b/client/core/rs/src/api/execute/mod.rs @@ -25,9 +25,9 @@ pub use sync::*; use crate::entities::{NoData, I64}; -pub trait MonitorExecuteRequest: HasResponse {} +pub trait KomodoExecuteRequest: HasResponse {} -/// A wrapper for all monitor exections. +/// A wrapper for all Komodo exections. #[typeshare] #[derive( Debug, @@ -61,12 +61,12 @@ pub enum Execution { // DEPLOYMENT Deploy(Deploy), - StartContainer(StartContainer), - RestartContainer(RestartContainer), - PauseContainer(PauseContainer), - UnpauseContainer(UnpauseContainer), - StopContainer(StopContainer), - RemoveContainer(RemoveContainer), + StartDeployment(StartDeployment), + RestartDeployment(RestartDeployment), + PauseDeployment(PauseDeployment), + UnpauseDeployment(UnpauseDeployment), + StopDeployment(StopDeployment), + DestroyDeployment(DestroyDeployment), // REPO CloneRepo(CloneRepo), @@ -74,12 +74,29 @@ pub enum Execution { BuildRepo(BuildRepo), CancelRepoBuild(CancelRepoBuild), - // SERVER + // SERVER (Container) + StartContainer(StartContainer), + RestartContainer(RestartContainer), + PauseContainer(PauseContainer), + UnpauseContainer(UnpauseContainer), + StopContainer(StopContainer), + DestroyContainer(DestroyContainer), + StartAllContainers(StartAllContainers), + RestartAllContainers(RestartAllContainers), + PauseAllContainers(PauseAllContainers), + UnpauseAllContainers(UnpauseAllContainers), StopAllContainers(StopAllContainers), - PruneNetworks(PruneNetworks), - PruneImages(PruneImages), PruneContainers(PruneContainers), + // SERVER (Prune) + DeleteNetwork(DeleteNetwork), + PruneNetworks(PruneNetworks), + DeleteImage(DeleteImage), + PruneImages(PruneImages), + DeleteVolume(DeleteVolume), + PruneVolumes(PruneVolumes), + PruneSystem(PruneSystem), + // SYNC RunSync(RunSync), diff --git a/client/core/rs/src/api/execute/procedure.rs b/client/core/rs/src/api/execute/procedure.rs index 9c04caa55..76144ef9c 100644 --- a/client/core/rs/src/api/execute/procedure.rs +++ b/client/core/rs/src/api/execute/procedure.rs @@ -6,7 +6,7 @@ use typeshare::typeshare; use crate::entities::update::Update; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; /// Runs the target procedure. Response: [Update] #[typeshare] @@ -20,7 +20,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct RunProcedure { /// Id or name diff --git a/client/core/rs/src/api/execute/repo.rs b/client/core/rs/src/api/execute/repo.rs index cae18592e..caeaacd1c 100644 --- a/client/core/rs/src/api/execute/repo.rs +++ b/client/core/rs/src/api/execute/repo.rs @@ -6,7 +6,7 @@ use typeshare::typeshare; use crate::entities::update::Update; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; // @@ -30,7 +30,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct CloneRepo { /// Id or name @@ -56,7 +56,7 @@ pub struct CloneRepo { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct PullRepo { /// Id or name @@ -86,7 +86,7 @@ pub struct PullRepo { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct BuildRepo { /// Id or name @@ -109,7 +109,7 @@ pub struct BuildRepo { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct CancelRepoBuild { /// Can be id or name diff --git a/client/core/rs/src/api/execute/server.rs b/client/core/rs/src/api/execute/server.rs index 62f7e4536..a60bec5f4 100644 --- a/client/core/rs/src/api/execute/server.rs +++ b/client/core/rs/src/api/execute/server.rs @@ -4,9 +4,256 @@ use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::update::Update; +use crate::entities::{update::Update, TerminationSignal}; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; + +// ============= +// = CONTAINER = +// ============= + +/// Starts the container on the target server. Response: [Update] +/// +/// 1. Runs `docker start ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct StartContainer { + /// Name or id + pub server: String, + /// The container name + pub container: String, +} + +// + +/// Restarts the container on the target server. Response: [Update] +/// +/// 1. Runs `docker restart ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct RestartContainer { + /// Name or id + pub server: String, + /// The container name + pub container: String, +} + +// + +/// Pauses the container on the target server. Response: [Update] +/// +/// 1. Runs `docker pause ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct PauseContainer { + /// Name or id + pub server: String, + /// The container name + pub container: String, +} + +// + +/// Unpauses the container on the target server. Response: [Update] +/// +/// 1. Runs `docker unpause ${container_name}`. +/// +/// Note. This is the only way to restart a paused container. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct UnpauseContainer { + /// Name or id + pub server: String, + /// The container name + pub container: String, +} + +// + +/// Stops the container on the target server. Response: [Update] +/// +/// 1. Runs `docker stop ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct StopContainer { + /// Name or id + pub server: String, + /// The container name + pub container: String, + /// Override the default termination signal. + pub signal: Option, + /// Override the default termination max time. + pub time: Option, +} + +// + +/// Stops and destroys the container on the target server. +/// Reponse: [Update]. +/// +/// 1. The container is stopped and removed using `docker container rm ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct DestroyContainer { + /// Name or id + pub server: String, + /// The container name + pub container: String, + /// Override the default termination signal. + pub signal: Option, + /// Override the default termination max time. + pub time: Option, +} + +// + +/// Starts all containers on the target server. Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct StartAllContainers { + /// Name or id + pub server: String, +} + +// + +/// Restarts all containers on the target server. Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct RestartAllContainers { + /// Name or id + pub server: String, +} + +// + +/// Pauses all containers on the target server. Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct PauseAllContainers { + /// Name or id + pub server: String, +} + +// + +/// Unpauses all containers on the target server. Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct UnpauseAllContainers { + /// Name or id + pub server: String, +} // @@ -22,7 +269,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct StopAllContainers { /// Name or id @@ -31,52 +278,6 @@ pub struct StopAllContainers { // -/// Prunes the docker networks on the target server. Response: [Update]. -/// -/// 1. Runs `docker network prune -f`. -#[typeshare] -#[derive( - Serialize, - Deserialize, - Debug, - Clone, - PartialEq, - Request, - EmptyTraits, - Parser, -)] -#[empty_traits(MonitorExecuteRequest)] -#[response(Update)] -pub struct PruneNetworks { - /// Id or name - pub server: String, -} - -// - -/// Prunes the docker images on the target server. Response: [Update]. -/// -/// 1. Runs `docker image prune -a -f`. -#[typeshare] -#[derive( - Serialize, - Deserialize, - Debug, - Clone, - PartialEq, - Request, - EmptyTraits, - Parser, -)] -#[empty_traits(MonitorExecuteRequest)] -#[response(Update)] -pub struct PruneImages { - /// Id or name - pub server: String, -} - -// - /// Prunes the docker containers on the target server. Response: [Update]. /// /// 1. Runs `docker container prune -f`. @@ -91,9 +292,171 @@ pub struct PruneImages { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct PruneContainers { /// Id or name pub server: String, } + +// ============================ +// = NETWORK / IMAGE / VOLUME = +// ============================ + +/// Delete a docker network. +/// Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct DeleteNetwork { + /// Id or name. + pub server: String, + /// The name of the network to delete. + pub name: String, +} + +// + +/// Prunes the docker networks on the target server. Response: [Update]. +/// +/// 1. Runs `docker network prune -f`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct PruneNetworks { + /// Id or name + pub server: String, +} + +// + +/// Delete a docker image. +/// Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct DeleteImage { + /// Id or name. + pub server: String, + /// The name of the image to delete. + pub name: String, +} + +// + +/// Prunes the docker images on the target server. Response: [Update]. +/// +/// 1. Runs `docker image prune -a -f`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct PruneImages { + /// Id or name + pub server: String, +} + +// + +/// Delete a docker volume. +/// Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct DeleteVolume { + /// Id or name. + pub server: String, + /// The name of the volume to delete. + pub name: String, +} + +/// Prunes the docker volumes on the target server. Response: [Update]. +/// +/// 1. Runs `docker volume prune -a -f`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct PruneVolumes { + /// Id or name + pub server: String, +} + +/// Prunes the docker system on the target server, including volumes. Response: [Update]. +/// +/// 1. Runs `docker system prune -a -f --volumes`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(KomodoExecuteRequest)] +#[response(Update)] +pub struct PruneSystem { + /// Id or name + pub server: String, +} diff --git a/client/core/rs/src/api/execute/server_template.rs b/client/core/rs/src/api/execute/server_template.rs index 2e6000daa..7971edecd 100644 --- a/client/core/rs/src/api/execute/server_template.rs +++ b/client/core/rs/src/api/execute/server_template.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::update::Update; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; /// Launch an EC2 instance with the specified config. /// Response: [Update]. @@ -13,7 +13,7 @@ use super::MonitorExecuteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct LaunchServer { /// The name of the created server. diff --git a/client/core/rs/src/api/execute/stack.rs b/client/core/rs/src/api/execute/stack.rs index 80e70e562..517a88037 100644 --- a/client/core/rs/src/api/execute/stack.rs +++ b/client/core/rs/src/api/execute/stack.rs @@ -6,7 +6,7 @@ use typeshare::typeshare; use crate::entities::update::Update; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; /// Deploys the target stack. `docker compose up`. Response: [Update] /// @@ -22,7 +22,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct DeployStack { /// Id or name @@ -46,7 +46,7 @@ pub struct DeployStack { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct StartStack { /// Id or name @@ -69,7 +69,7 @@ pub struct StartStack { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct RestartStack { /// Id or name @@ -92,7 +92,7 @@ pub struct RestartStack { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct PauseStack { /// Id or name @@ -117,7 +117,7 @@ pub struct PauseStack { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct UnpauseStack { /// Id or name @@ -140,7 +140,7 @@ pub struct UnpauseStack { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct StopStack { /// Id or name @@ -165,7 +165,7 @@ pub struct StopStack { EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct DestroyStack { /// Id or name diff --git a/client/core/rs/src/api/execute/sync.rs b/client/core/rs/src/api/execute/sync.rs index 83599a714..db00e1c1d 100644 --- a/client/core/rs/src/api/execute/sync.rs +++ b/client/core/rs/src/api/execute/sync.rs @@ -6,7 +6,7 @@ use typeshare::typeshare; use crate::entities::update::Update; -use super::MonitorExecuteRequest; +use super::KomodoExecuteRequest; /// Runs the target resource sync. Response: [Update] #[typeshare] @@ -20,7 +20,7 @@ use super::MonitorExecuteRequest; EmptyTraits, Parser, )] -#[empty_traits(MonitorExecuteRequest)] +#[empty_traits(KomodoExecuteRequest)] #[response(Update)] pub struct RunSync { /// Id or name diff --git a/client/core/rs/src/api/mod.rs b/client/core/rs/src/api/mod.rs index 9d1caa0b9..6fd695cb6 100644 --- a/client/core/rs/src/api/mod.rs +++ b/client/core/rs/src/api/mod.rs @@ -1,6 +1,6 @@ -//! # Monitor core API +//! # Komodo core API //! -//! Monitor core exposes an HTTP api using standard JSON serialization. +//! Komodo core exposes an HTTP api using standard JSON serialization. //! //! All calls share some common HTTP params: //! - Method: `POST` @@ -33,8 +33,8 @@ //! //! - [auth]: Requests relating to logging in / obtaining authentication tokens. //! - [user]: User self-management actions (manage api keys, etc.) -//! - [read]: Read only requests which retrieve data from Monitor. -//! - [execute]: Run actions on monitor resources, eg [execute::RunBuild]. +//! - [read]: Read only requests which retrieve data from Komodo. +//! - [execute]: Run actions on Komodo resources, eg [execute::RunBuild]. //! - [mod@write]: Requests which alter data, like create / update / delete resources. //! //! ## Errors diff --git a/client/core/rs/src/api/read/alert.rs b/client/core/rs/src/api/read/alert.rs index 01988cfe6..4f27ecad3 100644 --- a/client/core/rs/src/api/read/alert.rs +++ b/client/core/rs/src/api/read/alert.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::{alert::Alert, MongoDocument, I64, U64}; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// Get a paginated list of alerts sorted by timestamp descending. /// Response: [ListAlertsResponse]. @@ -13,7 +13,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListAlertsResponse)] pub struct ListAlerts { /// Pass a custom mongo query to filter the alerts. @@ -64,7 +64,7 @@ pub struct ListAlertsResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetAlertResponse)] pub struct GetAlert { pub id: String, diff --git a/client/core/rs/src/api/read/alerter.rs b/client/core/rs/src/api/read/alerter.rs index 75fedc3a1..ad45f803b 100644 --- a/client/core/rs/src/api/read/alerter.rs +++ b/client/core/rs/src/api/read/alerter.rs @@ -7,7 +7,7 @@ use crate::entities::alerter::{ Alerter, AlerterListItem, AlerterQuery, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -16,7 +16,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetAlerterResponse)] pub struct GetAlerter { /// Id or name @@ -34,7 +34,7 @@ pub type GetAlerterResponse = Alerter; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListAlertersResponse)] pub struct ListAlerters { /// Structured query to filter alerters. @@ -50,7 +50,7 @@ pub type ListAlertersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullAlertersResponse)] pub struct ListFullAlerters { /// Structured query to filter alerters. @@ -69,7 +69,7 @@ pub type ListFullAlertersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetAlertersSummaryResponse)] pub struct GetAlertersSummary {} diff --git a/client/core/rs/src/api/read/build.rs b/client/core/rs/src/api/read/build.rs index 033fcd3a0..76f3a4783 100644 --- a/client/core/rs/src/api/read/build.rs +++ b/client/core/rs/src/api/read/build.rs @@ -10,7 +10,7 @@ use crate::entities::{ Version, I64, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -19,7 +19,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuildResponse)] pub struct GetBuild { /// Id or name @@ -37,7 +37,7 @@ pub type GetBuildResponse = Build; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListBuildsResponse)] pub struct ListBuilds { /// optional structured query to filter builds. @@ -55,7 +55,7 @@ pub type ListBuildsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullBuildsResponse)] pub struct ListFullBuilds { /// optional structured query to filter builds. @@ -73,7 +73,7 @@ pub type ListFullBuildsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuildActionStateResponse)] pub struct GetBuildActionState { /// Id or name @@ -92,7 +92,7 @@ pub type GetBuildActionStateResponse = BuildActionState; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuildsSummaryResponse)] pub struct GetBuildsSummary {} @@ -100,7 +100,7 @@ pub struct GetBuildsSummary {} #[typeshare] #[derive(Serialize, Deserialize, Default, Debug, Clone)] pub struct GetBuildsSummaryResponse { - /// The total number of builds in monitor. + /// The total number of builds in Komodo. pub total: u32, /// The number of builds with Ok state. pub ok: u32, @@ -123,7 +123,7 @@ pub struct GetBuildsSummaryResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuildMonthlyStatsResponse)] pub struct GetBuildMonthlyStats { /// Query for older data by incrementing the page. @@ -184,7 +184,7 @@ impl GetBuildMonthlyStatsResponse { #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListBuildVersionsResponse)] pub struct ListBuildVersions { /// Id or name @@ -218,7 +218,7 @@ pub struct BuildVersionResponseItem { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListCommonBuildExtraArgsResponse)] pub struct ListCommonBuildExtraArgs { /// optional structured query to filter builds. @@ -236,7 +236,7 @@ pub type ListCommonBuildExtraArgsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuildWebhookEnabledResponse)] pub struct GetBuildWebhookEnabled { /// Id or name diff --git a/client/core/rs/src/api/read/builder.rs b/client/core/rs/src/api/read/builder.rs index 0806a78b8..0dd7e09e7 100644 --- a/client/core/rs/src/api/read/builder.rs +++ b/client/core/rs/src/api/read/builder.rs @@ -7,7 +7,7 @@ use crate::entities::builder::{ Builder, BuilderListItem, BuilderQuery, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -16,7 +16,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuilderResponse)] pub struct GetBuilder { /// Id or name @@ -34,7 +34,7 @@ pub type GetBuilderResponse = Builder; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListBuildersResponse)] pub struct ListBuilders { #[serde(default)] @@ -51,7 +51,7 @@ pub type ListBuildersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullBuildersResponse)] pub struct ListFullBuilders { #[serde(default)] @@ -69,7 +69,7 @@ pub type ListFullBuildersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetBuildersSummaryResponse)] pub struct GetBuildersSummary {} diff --git a/client/core/rs/src/api/read/deployment.rs b/client/core/rs/src/api/read/deployment.rs index f287d1ac3..d511a704d 100644 --- a/client/core/rs/src/api/read/deployment.rs +++ b/client/core/rs/src/api/read/deployment.rs @@ -5,15 +5,15 @@ use typeshare::typeshare; use crate::entities::{ deployment::{ - ContainerSummary, Deployment, DeploymentActionState, - DeploymentListItem, DeploymentQuery, DeploymentState, - DockerContainerStats, + Deployment, DeploymentActionState, DeploymentListItem, + DeploymentQuery, DeploymentState, }, + docker::container::{ContainerListItem, ContainerStats}, update::Log, SearchCombinator, I64, U64, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -22,7 +22,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetDeploymentResponse)] pub struct GetDeployment { /// Id or name @@ -41,7 +41,7 @@ pub type GetDeploymentResponse = Deployment; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListDeploymentsResponse)] pub struct ListDeployments { /// optional structured query to filter deployments. @@ -60,7 +60,7 @@ pub type ListDeploymentsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullDeploymentsResponse)] pub struct ListFullDeployments { /// optional structured query to filter deployments. @@ -83,7 +83,7 @@ pub type ListFullDeploymentsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetDeploymentContainerResponse)] pub struct GetDeploymentContainer { /// Id or name @@ -96,7 +96,7 @@ pub struct GetDeploymentContainer { #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetDeploymentContainerResponse { pub state: DeploymentState, - pub container: Option, + pub container: Option, } // @@ -109,9 +109,9 @@ pub struct GetDeploymentContainerResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] -#[response(GetLogResponse)] -pub struct GetLog { +#[empty_traits(KomodoReadRequest)] +#[response(GetDeploymentLogResponse)] +pub struct GetDeploymentLog { /// Id or name #[serde(alias = "id", alias = "name")] pub deployment: String, @@ -127,7 +127,7 @@ fn default_tail() -> u64 { } #[typeshare] -pub type GetLogResponse = Log; +pub type GetDeploymentLogResponse = Log; // @@ -139,9 +139,9 @@ pub type GetLogResponse = Log; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] -#[response(SearchLogResponse)] -pub struct SearchLog { +#[empty_traits(KomodoReadRequest)] +#[response(SearchDeploymentLogResponse)] +pub struct SearchDeploymentLog { /// Id or name #[serde(alias = "id", alias = "name")] pub deployment: String, @@ -159,7 +159,7 @@ pub struct SearchLog { } #[typeshare] -pub type SearchLogResponse = Log; +pub type SearchDeploymentLogResponse = Log; // @@ -171,7 +171,7 @@ pub type SearchLogResponse = Log; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetDeploymentStatsResponse)] pub struct GetDeploymentStats { /// Id or name @@ -180,7 +180,7 @@ pub struct GetDeploymentStats { } #[typeshare] -pub type GetDeploymentStatsResponse = DockerContainerStats; +pub type GetDeploymentStatsResponse = ContainerStats; // @@ -190,7 +190,7 @@ pub type GetDeploymentStatsResponse = DockerContainerStats; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(DeploymentActionState)] pub struct GetDeploymentActionState { /// Id or name @@ -209,7 +209,7 @@ pub type GetDeploymentActionStateResponse = DeploymentActionState; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetDeploymentsSummaryResponse)] pub struct GetDeploymentsSummary {} @@ -232,7 +232,7 @@ pub struct GetDeploymentsSummaryResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListCommonDeploymentExtraArgsResponse)] pub struct ListCommonDeploymentExtraArgs { /// optional structured query to filter deployments. diff --git a/client/core/rs/src/api/read/mod.rs b/client/core/rs/src/api/read/mod.rs index e45b1de57..f403a165b 100644 --- a/client/core/rs/src/api/read/mod.rs +++ b/client/core/rs/src/api/read/mod.rs @@ -47,21 +47,20 @@ pub use variable::*; use crate::entities::{ config::{DockerRegistry, GitProvider}, - update::ResourceTarget, - Timelength, + ResourceTarget, Timelength, }; -pub trait MonitorReadRequest: HasResponse {} +pub trait KomodoReadRequest: HasResponse {} // -/// Get the version of the core api. +/// Get the version of the Komodo Core api. /// Response: [GetVersionResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetVersionResponse)] pub struct GetVersion {} @@ -81,7 +80,7 @@ pub struct GetVersionResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetCoreInfoResponse)] pub struct GetCoreInfo {} @@ -116,7 +115,7 @@ pub struct GetCoreInfoResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListGitProvidersFromConfigResponse)] pub struct ListGitProvidersFromConfig { /// Accepts an optional Server or Builder target to expand the core list with @@ -140,7 +139,7 @@ pub type ListGitProvidersFromConfigResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListDockerRegistriesFromConfigResponse)] pub struct ListDockerRegistriesFromConfig { /// Accepts an optional Server or Builder target to expand the core list with @@ -159,7 +158,7 @@ pub type ListDockerRegistriesFromConfigResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListAwsEcrLabelsResponse)] pub struct ListAwsEcrLabels {} @@ -174,7 +173,7 @@ pub type ListAwsEcrLabelsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListSecretsResponse)] pub struct ListSecrets { /// Accepts an optional Server or Builder target to expand the core list with diff --git a/client/core/rs/src/api/read/permission.rs b/client/core/rs/src/api/read/permission.rs index 0e8cd5860..98cfa31cc 100644 --- a/client/core/rs/src/api/read/permission.rs +++ b/client/core/rs/src/api/read/permission.rs @@ -5,10 +5,10 @@ use typeshare::typeshare; use crate::entities::{ permission::{Permission, PermissionLevel, UserTarget}, - update::ResourceTarget, + ResourceTarget, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// List permissions for the calling user. /// Does not include any permissions on UserGroups they may be a part of. @@ -17,7 +17,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListPermissionsResponse)] pub struct ListPermissions {} @@ -33,7 +33,7 @@ pub type ListPermissionsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetPermissionLevelResponse)] pub struct GetPermissionLevel { /// The target to get user permission on. @@ -51,7 +51,7 @@ pub type GetPermissionLevelResponse = PermissionLevel; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListUserTargetPermissionsResponse)] pub struct ListUserTargetPermissions { /// Specify either a user or a user group. diff --git a/client/core/rs/src/api/read/procedure.rs b/client/core/rs/src/api/read/procedure.rs index 76003dfba..9663cd6d7 100644 --- a/client/core/rs/src/api/read/procedure.rs +++ b/client/core/rs/src/api/read/procedure.rs @@ -7,7 +7,7 @@ use crate::entities::procedure::{ Procedure, ProcedureActionState, ProcedureListItem, ProcedureQuery, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -16,7 +16,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetProcedureResponse)] pub struct GetProcedure { /// Id or name @@ -34,7 +34,7 @@ pub type GetProcedureResponse = Procedure; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListProceduresResponse)] pub struct ListProcedures { /// optional structured query to filter procedures. @@ -52,7 +52,7 @@ pub type ListProceduresResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullProceduresResponse)] pub struct ListFullProcedures { /// optional structured query to filter procedures. @@ -70,7 +70,7 @@ pub type ListFullProceduresResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetProcedureActionStateResponse)] pub struct GetProcedureActionState { /// Id or name @@ -89,7 +89,7 @@ pub type GetProcedureActionStateResponse = ProcedureActionState; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetProceduresSummaryResponse)] pub struct GetProceduresSummary {} diff --git a/client/core/rs/src/api/read/provider.rs b/client/core/rs/src/api/read/provider.rs index 748463150..8a648d0a8 100644 --- a/client/core/rs/src/api/read/provider.rs +++ b/client/core/rs/src/api/read/provider.rs @@ -7,7 +7,7 @@ use crate::entities::provider::{ DockerRegistryAccount, GitProviderAccount, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// Get a specific git provider account. /// Response: [GetGitProviderAccountResponse]. @@ -15,7 +15,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetGitProviderAccountResponse)] pub struct GetGitProviderAccount { pub id: String, @@ -32,7 +32,7 @@ pub type GetGitProviderAccountResponse = GitProviderAccount; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListGitProviderAccountsResponse)] pub struct ListGitProviderAccounts { /// Optionally filter by accounts with a specific domain. @@ -52,7 +52,7 @@ pub type ListGitProviderAccountsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetDockerRegistryAccountResponse)] pub struct GetDockerRegistryAccount { pub id: String, @@ -69,7 +69,7 @@ pub type GetDockerRegistryAccountResponse = DockerRegistryAccount; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListDockerRegistryAccountsResponse)] pub struct ListDockerRegistryAccounts { /// Optionally filter by accounts with a specific domain. diff --git a/client/core/rs/src/api/read/repo.rs b/client/core/rs/src/api/read/repo.rs index 7363b353f..4f8e7b5f3 100644 --- a/client/core/rs/src/api/read/repo.rs +++ b/client/core/rs/src/api/read/repo.rs @@ -7,7 +7,7 @@ use crate::entities::repo::{ Repo, RepoActionState, RepoListItem, RepoQuery, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -16,7 +16,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(Repo)] pub struct GetRepo { /// Id or name @@ -34,7 +34,7 @@ pub type GetRepoResponse = Repo; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListReposResponse)] pub struct ListRepos { /// optional structured query to filter repos. @@ -52,7 +52,7 @@ pub type ListReposResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullReposResponse)] pub struct ListFullRepos { /// optional structured query to filter repos. @@ -70,7 +70,7 @@ pub type ListFullReposResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetRepoActionStateResponse)] pub struct GetRepoActionState { /// Id or name @@ -89,7 +89,7 @@ pub type GetRepoActionStateResponse = RepoActionState; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetReposSummaryResponse)] pub struct GetReposSummary {} @@ -120,7 +120,7 @@ pub struct GetReposSummaryResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetRepoWebhooksEnabledResponse)] pub struct GetRepoWebhooksEnabled { /// Id or name diff --git a/client/core/rs/src/api/read/search.rs b/client/core/rs/src/api/read/search.rs index 59e6973b0..67f1e2f77 100644 --- a/client/core/rs/src/api/read/search.rs +++ b/client/core/rs/src/api/read/search.rs @@ -6,11 +6,10 @@ use typeshare::typeshare; use crate::entities::{ build::BuildListItem, deployment::DeploymentListItem, procedure::ProcedureListItem, repo::RepoListItem, - server::ServerListItem, update::ResourceTargetVariant, - MongoDocument, + server::ServerListItem, MongoDocument, ResourceTargetVariant, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -19,7 +18,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(FindResourcesResponse)] pub struct FindResources { /// The mongo query as JSON diff --git a/client/core/rs/src/api/read/server.rs b/client/core/rs/src/api/read/server.rs index a177f021c..28fba5d7a 100644 --- a/client/core/rs/src/api/read/server.rs +++ b/client/core/rs/src/api/read/server.rs @@ -4,22 +4,25 @@ use serde::{Deserialize, Serialize}; use typeshare::typeshare; use crate::entities::{ - deployment::ContainerSummary, + docker::{ + container::{Container, ContainerListItem}, + image::{Image, ImageHistoryResponseItem, ImageListItem}, + network::{Network, NetworkListItem}, + volume::{Volume, VolumeListItem}, + }, server::{ - docker_image::ImageSummary, - docker_network::DockerNetwork, - stats::{ - SystemInformation, SystemProcess, SystemStats, - SystemStatsRecord, - }, Server, ServerActionState, ServerListItem, ServerQuery, ServerState, }, stack::ComposeProject, - Timelength, I64, + stats::{ + SystemInformation, SystemProcess, SystemStats, SystemStatsRecord, + }, + update::Log, + ResourceTarget, SearchCombinator, Timelength, I64, U64, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -28,7 +31,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(Server)] pub struct GetServer { /// Id or name @@ -46,7 +49,7 @@ pub type GetServerResponse = Server; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListServersResponse)] pub struct ListServers { /// optional structured query to filter servers. @@ -64,7 +67,7 @@ pub type ListServersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullServersResponse)] pub struct ListFullServers { /// optional structured query to filter servers. @@ -82,7 +85,7 @@ pub type ListFullServersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetServerStateResponse)] pub struct GetServerState { /// Id or name @@ -105,7 +108,7 @@ pub struct GetServerStateResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ServerActionState)] pub struct GetServerActionState { /// Id or name @@ -118,13 +121,13 @@ pub type GetServerActionStateResponse = ServerActionState; // -/// Get the version of the monitor periphery agent on the target server. +/// Get the version of the Komodo Periphery agent on the target server. /// Response: [GetPeripheryVersionResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetPeripheryVersionResponse)] pub struct GetPeripheryVersion { /// Id or name @@ -147,7 +150,7 @@ pub struct GetPeripheryVersionResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListDockerNetworksResponse)] pub struct ListDockerNetworks { /// Id or name @@ -156,7 +159,27 @@ pub struct ListDockerNetworks { } #[typeshare] -pub type ListDockerNetworksResponse = Vec; +pub type ListDockerNetworksResponse = Vec; + +// + +/// Inspect a docker network on the server. Response: [InspectDockerNetworkResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(InspectDockerNetworkResponse)] +pub struct InspectDockerNetwork { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The network name + pub network: String, +} + +#[typeshare] +pub type InspectDockerNetworkResponse = Network; // @@ -166,7 +189,7 @@ pub type ListDockerNetworksResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListDockerImagesResponse)] pub struct ListDockerImages { /// Id or name @@ -175,7 +198,48 @@ pub struct ListDockerImages { } #[typeshare] -pub type ListDockerImagesResponse = Vec; +pub type ListDockerImagesResponse = Vec; + +// + +/// Inspect a docker image on the server. Response: [Image]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(InspectDockerImageResponse)] +pub struct InspectDockerImage { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The image name + pub image: String, +} + +#[typeshare] +pub type InspectDockerImageResponse = Image; + +// + +/// Get image history from the server. Response: [ListDockerImageHistoryResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(ListDockerImageHistoryResponse)] +pub struct ListDockerImageHistory { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The image name + pub image: String, +} + +#[typeshare] +pub type ListDockerImageHistoryResponse = + Vec; // @@ -185,7 +249,7 @@ pub type ListDockerImagesResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListDockerContainersResponse)] pub struct ListDockerContainers { /// Id or name @@ -194,17 +258,165 @@ pub struct ListDockerContainers { } #[typeshare] -pub type ListDockerContainersResponse = Vec; +pub type ListDockerContainersResponse = Vec; // -/// List all compose projects on the target server. +/// Inspect a docker container on the server. Response: [Container]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(InspectDockerContainerResponse)] +pub struct InspectDockerContainer { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The container name + pub container: String, +} + +#[typeshare] +pub type InspectDockerContainerResponse = Container; + +// + +/// Get the container log's tail, split by stdout/stderr. +/// Response: [Log]. +/// +/// Note. This call will hit the underlying server directly for most up to date log. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(GetContainerLogResponse)] +pub struct GetContainerLog { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The container name + pub container: String, + /// The number of lines of the log tail to include. + /// Default: 100. + /// Max: 5000. + #[serde(default = "default_tail")] + pub tail: U64, +} + +fn default_tail() -> u64 { + 50 +} + +#[typeshare] +pub type GetContainerLogResponse = Log; + +// + +/// Search the container log's tail using `grep`. All lines go to stdout. +/// Response: [Log]. +/// +/// Note. This call will hit the underlying server directly for most up to date log. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(SearchContainerLogResponse)] +pub struct SearchContainerLog { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The container name + pub container: String, + /// The terms to search for. + pub terms: Vec, + /// When searching for multiple terms, can use `AND` or `OR` combinator. + /// + /// - `AND`: Only include lines with **all** terms present in that line. + /// - `OR`: Include lines that have one or more matches in the terms. + #[serde(default)] + pub combinator: SearchCombinator, + /// Invert the results, ie return all lines that DON'T match the terms / combinator. + #[serde(default)] + pub invert: bool, +} + +#[typeshare] +pub type SearchContainerLogResponse = Log; + +// + +/// Inspect a docker container on the server. Response: [Container]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(GetResourceMatchingContainerResponse)] +pub struct GetResourceMatchingContainer { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The container name + pub container: String, +} + +#[typeshare] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct GetResourceMatchingContainerResponse { + pub resource: Option, +} + +// + +/// List all docker volumes on the target server. +/// Response: [ListDockerVolumesResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(ListDockerVolumesResponse)] +pub struct ListDockerVolumes { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, +} + +#[typeshare] +pub type ListDockerVolumesResponse = Vec; + +// + +/// Inspect a docker volume on the server. Response: [Volume]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoReadRequest)] +#[response(InspectDockerVolumeResponse)] +pub struct InspectDockerVolume { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, + /// The volume name + pub volume: String, +} + +#[typeshare] +pub type InspectDockerVolumeResponse = Volume; + +// + +/// List all docker compose projects on the target server. /// Response: [ListComposeProjectsResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListComposeProjectsResponse)] pub struct ListComposeProjects { /// Id or name @@ -223,7 +435,7 @@ pub type ListComposeProjectsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetSystemInformationResponse)] pub struct GetSystemInformation { /// Id or name @@ -245,7 +457,7 @@ pub type GetSystemInformationResponse = SystemInformation; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetSystemStatsResponse)] pub struct GetSystemStats { /// Id or name @@ -268,7 +480,7 @@ pub type GetSystemStatsResponse = SystemStats; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListSystemProcessesResponse)] pub struct ListSystemProcesses { /// Id or name @@ -287,7 +499,7 @@ pub type ListSystemProcessesResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetHistoricalServerStatsResponse)] pub struct GetHistoricalServerStats { /// Id or name @@ -319,7 +531,7 @@ pub struct GetHistoricalServerStatsResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetServersSummaryResponse)] pub struct GetServersSummary {} diff --git a/client/core/rs/src/api/read/server_template.rs b/client/core/rs/src/api/read/server_template.rs index 176f5d7fd..2a52d1ff5 100644 --- a/client/core/rs/src/api/read/server_template.rs +++ b/client/core/rs/src/api/read/server_template.rs @@ -7,7 +7,7 @@ use crate::entities::server_template::{ ServerTemplate, ServerTemplateListItem, ServerTemplateQuery, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -16,7 +16,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetServerTemplateResponse)] pub struct GetServerTemplate { /// Id or name @@ -34,7 +34,7 @@ pub type GetServerTemplateResponse = ServerTemplate; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListServerTemplatesResponse)] pub struct ListServerTemplates { #[serde(default)] @@ -51,7 +51,7 @@ pub type ListServerTemplatesResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullServerTemplatesResponse)] pub struct ListFullServerTemplates { #[serde(default)] @@ -69,7 +69,7 @@ pub type ListFullServerTemplatesResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetServerTemplatesSummaryResponse)] pub struct GetServerTemplatesSummary {} diff --git a/client/core/rs/src/api/read/stack.rs b/client/core/rs/src/api/read/stack.rs index 6c0a9204a..95142a741 100644 --- a/client/core/rs/src/api/read/stack.rs +++ b/client/core/rs/src/api/read/stack.rs @@ -11,7 +11,7 @@ use crate::entities::{ SearchCombinator, U64, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -20,7 +20,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetStackResponse)] pub struct GetStack { /// Id or name @@ -38,7 +38,7 @@ pub type GetStackResponse = Stack; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListStackServicesResponse)] pub struct ListStackServices { /// Id or name @@ -56,7 +56,7 @@ pub type ListStackServicesResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetStackServiceLogResponse)] pub struct GetStackServiceLog { /// Id or name @@ -88,7 +88,7 @@ pub type GetStackServiceLogResponse = Log; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(SearchStackServiceLogResponse)] pub struct SearchStackServiceLog { /// Id or name @@ -120,7 +120,7 @@ pub type SearchStackServiceLogResponse = Log; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListCommonStackExtraArgsResponse)] pub struct ListCommonStackExtraArgs { /// optional structured query to filter stacks. @@ -138,7 +138,7 @@ pub type ListCommonStackExtraArgsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListStacksResponse)] pub struct ListStacks { /// optional structured query to filter syncs. @@ -156,7 +156,7 @@ pub type ListStacksResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullStacksResponse)] pub struct ListFullStacks { /// optional structured query to filter stacks. @@ -174,7 +174,7 @@ pub type ListFullStacksResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetStackActionStateResponse)] pub struct GetStackActionState { /// Id or name @@ -193,7 +193,7 @@ pub type GetStackActionStateResponse = StackActionState; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetStacksSummaryResponse)] pub struct GetStacksSummary {} @@ -232,7 +232,7 @@ pub struct GetStacksSummaryResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetStackWebhooksEnabledResponse)] pub struct GetStackWebhooksEnabled { /// Id or name diff --git a/client/core/rs/src/api/read/sync.rs b/client/core/rs/src/api/read/sync.rs index e6e452ef2..a49f3bd17 100644 --- a/client/core/rs/src/api/read/sync.rs +++ b/client/core/rs/src/api/read/sync.rs @@ -8,7 +8,7 @@ use crate::entities::sync::{ ResourceSyncQuery, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -17,7 +17,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ResourceSync)] pub struct GetResourceSync { /// Id or name @@ -35,7 +35,7 @@ pub type GetResourceSyncResponse = ResourceSync; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListResourceSyncsResponse)] pub struct ListResourceSyncs { /// optional structured query to filter syncs. @@ -53,7 +53,7 @@ pub type ListResourceSyncsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListFullResourceSyncsResponse)] pub struct ListFullResourceSyncs { /// optional structured query to filter syncs. @@ -71,7 +71,7 @@ pub type ListFullResourceSyncsResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetResourceSyncActionStateResponse)] pub struct GetResourceSyncActionState { /// Id or name @@ -90,7 +90,7 @@ pub type GetResourceSyncActionStateResponse = ResourceSyncActionState; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetResourceSyncsSummaryResponse)] pub struct GetResourceSyncsSummary {} @@ -119,7 +119,7 @@ pub struct GetResourceSyncsSummaryResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetSyncWebhooksEnabledResponse)] pub struct GetSyncWebhooksEnabled { /// Id or name diff --git a/client/core/rs/src/api/read/tag.rs b/client/core/rs/src/api/read/tag.rs index 7c8ad2293..169b703c2 100644 --- a/client/core/rs/src/api/read/tag.rs +++ b/client/core/rs/src/api/read/tag.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::{tag::Tag, MongoDocument}; -use super::MonitorReadRequest; +use super::KomodoReadRequest; // @@ -14,7 +14,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetTagResponse)] pub struct GetTag { /// Id or name @@ -33,7 +33,7 @@ pub type GetTagResponse = Tag; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListTagsResponse)] pub struct ListTags { pub query: Option, diff --git a/client/core/rs/src/api/read/toml.rs b/client/core/rs/src/api/read/toml.rs index 10fe2a8f3..afcef36b3 100644 --- a/client/core/rs/src/api/read/toml.rs +++ b/client/core/rs/src/api/read/toml.rs @@ -3,9 +3,9 @@ use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::update::ResourceTarget; +use crate::entities::ResourceTarget; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// Response containing pretty formatted toml contents. #[typeshare] @@ -23,7 +23,7 @@ pub struct TomlResponse { #[derive( Debug, Clone, Default, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ExportAllResourcesToTomlResponse)] pub struct ExportAllResourcesToToml { /// Tag name or id. Empty array will not filter by tag. @@ -42,7 +42,7 @@ pub type ExportAllResourcesToTomlResponse = TomlResponse; #[derive( Debug, Clone, Default, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ExportResourcesToTomlResponse)] pub struct ExportResourcesToToml { /// The targets to include in the export. diff --git a/client/core/rs/src/api/read/update.rs b/client/core/rs/src/api/read/update.rs index a8407e841..f153a871f 100644 --- a/client/core/rs/src/api/read/update.rs +++ b/client/core/rs/src/api/read/update.rs @@ -8,7 +8,7 @@ use crate::entities::{ MongoDocument, }; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// Get all data for the target update. /// Response: [Update]. @@ -16,7 +16,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetUpdateResponse)] pub struct GetUpdate { /// The update id. @@ -34,7 +34,7 @@ pub type GetUpdateResponse = Update; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListUpdatesResponse)] pub struct ListUpdates { /// An optional mongo query to filter the updates. diff --git a/client/core/rs/src/api/read/user.rs b/client/core/rs/src/api/read/user.rs index 6a96d5e68..dbf51b957 100644 --- a/client/core/rs/src/api/read/user.rs +++ b/client/core/rs/src/api/read/user.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::{api_key::ApiKey, user::User}; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// Gets list of api keys for the calling user. /// Response: [ListApiKeysResponse] @@ -13,7 +13,7 @@ use super::MonitorReadRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListApiKeysResponse)] pub struct ListApiKeys {} @@ -30,7 +30,7 @@ pub type ListApiKeysResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListApiKeysForServiceUserResponse)] pub struct ListApiKeysForServiceUser { /// Id or username @@ -50,7 +50,7 @@ pub type ListApiKeysForServiceUserResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(FindUserResponse)] pub struct FindUser { /// Id or username @@ -64,13 +64,13 @@ pub type FindUserResponse = User; // /// **Admin only.** -/// Gets list of monitor users. +/// Gets list of Komodo users. /// Response: [ListUsersResponse] #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListUsersResponse)] pub struct ListUsers {} @@ -85,7 +85,7 @@ pub type ListUsersResponse = Vec; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetUsernameResponse)] pub struct GetUsername { /// The id of the user. diff --git a/client/core/rs/src/api/read/user_group.rs b/client/core/rs/src/api/read/user_group.rs index 11d87cbee..76103fa3b 100644 --- a/client/core/rs/src/api/read/user_group.rs +++ b/client/core/rs/src/api/read/user_group.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::user_group::UserGroup; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// Get a specific user group by name or id. /// Response: [UserGroup]. @@ -13,7 +13,7 @@ use super::MonitorReadRequest; #[derive( Debug, Clone, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetUserGroupResponse)] pub struct GetUserGroup { /// Name or Id @@ -33,7 +33,7 @@ pub type GetUserGroupResponse = UserGroup; #[derive( Debug, Clone, Default, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListUserGroupsResponse)] pub struct ListUserGroups {} diff --git a/client/core/rs/src/api/read/variable.rs b/client/core/rs/src/api/read/variable.rs index 7ef0c624c..d034f073c 100644 --- a/client/core/rs/src/api/read/variable.rs +++ b/client/core/rs/src/api/read/variable.rs @@ -5,15 +5,18 @@ use typeshare::typeshare; use crate::entities::variable::Variable; -use super::MonitorReadRequest; +use super::KomodoReadRequest; /// List all available global variables. /// Response: [Variable] +/// +/// Note. For non admin users making this call, +/// secret variables will have their values obscured. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(GetVariableResponse)] pub struct GetVariable { /// The name of the variable to get. @@ -27,11 +30,14 @@ pub type GetVariableResponse = Variable; /// List all available global variables. /// Response: [ListVariablesResponse] +/// +/// Note. For non admin users making this call, +/// secret variables will have their values obscured. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, )] -#[empty_traits(MonitorReadRequest)] +#[empty_traits(KomodoReadRequest)] #[response(ListVariablesResponse)] pub struct ListVariables {} diff --git a/client/core/rs/src/api/user.rs b/client/core/rs/src/api/user.rs index 18d072882..3c18e5de5 100644 --- a/client/core/rs/src/api/user.rs +++ b/client/core/rs/src/api/user.rs @@ -3,9 +3,9 @@ use resolver_api::{derive::Request, HasResponse}; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::{update::ResourceTarget, NoData, I64}; +use crate::entities::{NoData, ResourceTarget, I64}; -pub trait MonitorUserRequest: HasResponse {} +pub trait KomodoUserRequest: HasResponse {} // @@ -15,7 +15,7 @@ pub trait MonitorUserRequest: HasResponse {} #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorUserRequest)] +#[empty_traits(KomodoUserRequest)] #[response(PushRecentlyViewedResponse)] pub struct PushRecentlyViewed { /// The target to push. @@ -34,7 +34,7 @@ pub type PushRecentlyViewedResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorUserRequest)] +#[empty_traits(KomodoUserRequest)] #[response(SetLastSeenUpdateResponse)] pub struct SetLastSeenUpdate {} @@ -52,7 +52,7 @@ pub type SetLastSeenUpdateResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorUserRequest)] +#[empty_traits(KomodoUserRequest)] #[response(CreateApiKeyResponse)] pub struct CreateApiKey { /// The name for the api key. @@ -86,7 +86,7 @@ pub struct CreateApiKeyResponse { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorUserRequest)] +#[empty_traits(KomodoUserRequest)] #[response(DeleteApiKeyResponse)] pub struct DeleteApiKey { /// The key which the user intends to delete. diff --git a/client/core/rs/src/api/write/alerter.rs b/client/core/rs/src/api/write/alerter.rs index dbc2cd2f4..3558dc63d 100644 --- a/client/core/rs/src/api/write/alerter.rs +++ b/client/core/rs/src/api/write/alerter.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::alerter::{Alerter, _PartialAlerterConfig}; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -14,7 +14,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Alerter)] pub struct CreateAlerter { /// The name given to newly created alerter. @@ -31,7 +31,7 @@ pub struct CreateAlerter { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Alerter)] pub struct CopyAlerter { /// The name of the new alerter. @@ -48,7 +48,7 @@ pub struct CopyAlerter { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Alerter)] pub struct DeleteAlerter { /// The id or name of the alerter to delete. @@ -67,7 +67,7 @@ pub struct DeleteAlerter { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Alerter)] pub struct UpdateAlerter { /// The id of the alerter to update. diff --git a/client/core/rs/src/api/write/api_key.rs b/client/core/rs/src/api/write/api_key.rs index e14801042..95a016d20 100644 --- a/client/core/rs/src/api/write/api_key.rs +++ b/client/core/rs/src/api/write/api_key.rs @@ -8,7 +8,7 @@ use crate::{ entities::{NoData, I64}, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -18,7 +18,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateApiKeyForServiceUserResponse)] pub struct CreateApiKeyForServiceUser { /// Must be service user @@ -42,7 +42,7 @@ pub type CreateApiKeyForServiceUserResponse = CreateApiKeyResponse; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteApiKeyForServiceUserResponse)] pub struct DeleteApiKeyForServiceUser { pub key: String, diff --git a/client/core/rs/src/api/write/build.rs b/client/core/rs/src/api/write/build.rs index 7cbf4e6e7..91804d261 100644 --- a/client/core/rs/src/api/write/build.rs +++ b/client/core/rs/src/api/write/build.rs @@ -8,7 +8,7 @@ use crate::entities::{ NoData, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -17,7 +17,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Build)] pub struct CreateBuild { /// The name given to newly created build. @@ -34,7 +34,7 @@ pub struct CreateBuild { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Build)] pub struct CopyBuild { /// The name of the new build. @@ -51,7 +51,7 @@ pub struct CopyBuild { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Build)] pub struct DeleteBuild { /// The id or name of the build to delete. @@ -72,7 +72,7 @@ pub struct DeleteBuild { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Build)] pub struct UpdateBuild { /// The id of the build to update. @@ -88,7 +88,7 @@ pub struct UpdateBuild { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(NoData)] pub struct RefreshBuildCache { /// Id or name @@ -103,7 +103,7 @@ pub struct RefreshBuildCache { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateBuildWebhookResponse)] pub struct CreateBuildWebhook { /// Id or name @@ -122,7 +122,7 @@ pub type CreateBuildWebhookResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteBuildWebhookResponse)] pub struct DeleteBuildWebhook { /// Id or name diff --git a/client/core/rs/src/api/write/builder.rs b/client/core/rs/src/api/write/builder.rs index ce6da9b23..1e38f7234 100644 --- a/client/core/rs/src/api/write/builder.rs +++ b/client/core/rs/src/api/write/builder.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::builder::{Builder, PartialBuilderConfig}; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -14,7 +14,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Builder)] pub struct CreateBuilder { /// The name given to newly created builder. @@ -31,7 +31,7 @@ pub struct CreateBuilder { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Builder)] pub struct CopyBuilder { /// The name of the new builder. @@ -48,7 +48,7 @@ pub struct CopyBuilder { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Builder)] pub struct DeleteBuilder { /// The id or name of the builder to delete. @@ -69,7 +69,7 @@ pub struct DeleteBuilder { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Builder)] pub struct UpdateBuilder { /// The id of the builder to update. diff --git a/client/core/rs/src/api/write/deployment.rs b/client/core/rs/src/api/write/deployment.rs index de818e181..9eb1abd11 100644 --- a/client/core/rs/src/api/write/deployment.rs +++ b/client/core/rs/src/api/write/deployment.rs @@ -8,7 +8,7 @@ use crate::entities::{ update::Update, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -17,7 +17,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Deployment)] pub struct CreateDeployment { /// The name given to newly created deployment. @@ -34,7 +34,7 @@ pub struct CreateDeployment { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Deployment)] pub struct CopyDeployment { /// The name of the new deployment. @@ -54,7 +54,7 @@ pub struct CopyDeployment { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Deployment)] pub struct DeleteDeployment { /// The id or name of the deployment to delete. @@ -78,7 +78,7 @@ pub struct DeleteDeployment { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Deployment)] pub struct UpdateDeployment { /// The deployment id to update. @@ -97,7 +97,7 @@ pub struct UpdateDeployment { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Update)] pub struct RenameDeployment { /// The id of the deployment to rename. diff --git a/client/core/rs/src/api/write/description.rs b/client/core/rs/src/api/write/description.rs index 8633d0e20..eb99542bd 100644 --- a/client/core/rs/src/api/write/description.rs +++ b/client/core/rs/src/api/write/description.rs @@ -3,9 +3,9 @@ use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::{update::ResourceTarget, NoData}; +use crate::entities::{NoData, ResourceTarget}; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; /// Update a resources description. /// Response: [NoData]. @@ -13,7 +13,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateDescriptionResponse)] pub struct UpdateDescription { /// The target resource to set description for. diff --git a/client/core/rs/src/api/write/mod.rs b/client/core/rs/src/api/write/mod.rs index a703424ed..a093f0925 100644 --- a/client/core/rs/src/api/write/mod.rs +++ b/client/core/rs/src/api/write/mod.rs @@ -36,4 +36,4 @@ pub use user::*; pub use user_group::*; pub use variable::*; -pub trait MonitorWriteRequest: resolver_api::HasResponse {} +pub trait KomodoWriteRequest: resolver_api::HasResponse {} diff --git a/client/core/rs/src/api/write/permissions.rs b/client/core/rs/src/api/write/permissions.rs index a187a3884..7453b8468 100644 --- a/client/core/rs/src/api/write/permissions.rs +++ b/client/core/rs/src/api/write/permissions.rs @@ -5,11 +5,10 @@ use typeshare::typeshare; use crate::entities::{ permission::{PermissionLevel, UserTarget}, - update::{ResourceTarget, ResourceTargetVariant}, - NoData, + NoData, ResourceTarget, ResourceTargetVariant, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; /// **Admin only.** Update a user or user groups permission on a resource. /// Response: [NoData]. @@ -17,7 +16,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdatePermissionOnTargetResponse)] pub struct UpdatePermissionOnTarget { /// Specify the user or user group. @@ -39,7 +38,7 @@ pub type UpdatePermissionOnTargetResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdatePermissionOnResourceTypeResponse)] pub struct UpdatePermissionOnResourceType { /// Specify the user or user group. @@ -61,7 +60,7 @@ pub type UpdatePermissionOnResourceTypeResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateUserBasePermissionsResponse)] pub struct UpdateUserBasePermissions { /// The target user. diff --git a/client/core/rs/src/api/write/procedure.rs b/client/core/rs/src/api/write/procedure.rs index b860e844e..125624b7f 100644 --- a/client/core/rs/src/api/write/procedure.rs +++ b/client/core/rs/src/api/write/procedure.rs @@ -7,7 +7,7 @@ use crate::entities::procedure::{ Procedure, _PartialProcedureConfig, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -16,7 +16,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateProcedureResponse)] pub struct CreateProcedure { /// The name given to newly created build. @@ -36,7 +36,7 @@ pub type CreateProcedureResponse = Procedure; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CopyProcedureResponse)] pub struct CopyProcedure { /// The name of the new procedure. @@ -56,7 +56,7 @@ pub type CopyProcedureResponse = Procedure; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteProcedureResponse)] pub struct DeleteProcedure { /// The id or name of the procedure to delete. @@ -80,7 +80,7 @@ pub type DeleteProcedureResponse = Procedure; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateProcedureResponse)] pub struct UpdateProcedure { /// The id of the procedure to update. diff --git a/client/core/rs/src/api/write/provider.rs b/client/core/rs/src/api/write/provider.rs index 56ead83d2..fc5031800 100644 --- a/client/core/rs/src/api/write/provider.rs +++ b/client/core/rs/src/api/write/provider.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::provider::*; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; /// **Admin only.** Create a git provider account. /// Response: [GitProviderAccount]. @@ -13,7 +13,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateGitProviderAccountResponse)] pub struct CreateGitProviderAccount { /// The initial account config. Anything in the _id field will be ignored, @@ -32,7 +32,7 @@ pub type CreateGitProviderAccountResponse = GitProviderAccount; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateGitProviderAccountResponse)] pub struct UpdateGitProviderAccount { /// The id of the git provider account to update. @@ -52,7 +52,7 @@ pub type UpdateGitProviderAccountResponse = GitProviderAccount; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteGitProviderAccountResponse)] pub struct DeleteGitProviderAccount { /// The id of the git provider to delete @@ -70,7 +70,7 @@ pub type DeleteGitProviderAccountResponse = GitProviderAccount; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateDockerRegistryAccountResponse)] pub struct CreateDockerRegistryAccount { pub account: _PartialDockerRegistryAccount, @@ -87,7 +87,7 @@ pub type CreateDockerRegistryAccountResponse = DockerRegistryAccount; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateDockerRegistryAccountResponse)] pub struct UpdateDockerRegistryAccount { /// The id of the docker registry to update @@ -107,7 +107,7 @@ pub type UpdateDockerRegistryAccountResponse = DockerRegistryAccount; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteDockerRegistryAccountResponse)] pub struct DeleteDockerRegistryAccount { /// The id of the docker registry account to delete diff --git a/client/core/rs/src/api/write/repo.rs b/client/core/rs/src/api/write/repo.rs index 903a5035d..9d723c65f 100644 --- a/client/core/rs/src/api/write/repo.rs +++ b/client/core/rs/src/api/write/repo.rs @@ -8,7 +8,7 @@ use crate::entities::{ NoData, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -17,7 +17,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Repo)] pub struct CreateRepo { /// The name given to newly created repo. @@ -34,7 +34,7 @@ pub struct CreateRepo { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Repo)] pub struct CopyRepo { /// The name of the new repo. @@ -51,7 +51,7 @@ pub struct CopyRepo { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Repo)] pub struct DeleteRepo { /// The id or name of the repo to delete. @@ -75,7 +75,7 @@ pub struct DeleteRepo { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Repo)] pub struct UpdateRepo { /// The id of the repo to update. @@ -91,7 +91,7 @@ pub struct UpdateRepo { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(NoData)] pub struct RefreshRepoCache { /// Id or name @@ -108,13 +108,13 @@ pub enum RepoWebhookAction { Build, } -/// Create a webhook on the github repo attached to the (monitor) repo +/// Create a webhook on the github repo attached to the (Komodo) Repo resource. /// passed in request. Response: [CreateRepoWebhookResponse] #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateRepoWebhookResponse)] pub struct CreateRepoWebhook { /// Id or name @@ -129,13 +129,13 @@ pub type CreateRepoWebhookResponse = NoData; // -/// Delete the webhook on the github repo attached to the (monitor) repo +/// Delete the webhook on the github repo attached to the (Komodo) Repo resource. /// passed in request. Response: [DeleteRepoWebhookResponse] #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteRepoWebhookResponse)] pub struct DeleteRepoWebhook { /// Id or name diff --git a/client/core/rs/src/api/write/server.rs b/client/core/rs/src/api/write/server.rs index 2c5794f01..4f3a33bee 100644 --- a/client/core/rs/src/api/write/server.rs +++ b/client/core/rs/src/api/write/server.rs @@ -8,7 +8,7 @@ use crate::entities::{ update::Update, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -17,7 +17,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Server)] pub struct CreateServer { /// The name given to newly created server. @@ -34,7 +34,7 @@ pub struct CreateServer { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Server)] pub struct DeleteServer { /// The id or name of the server to delete. @@ -55,7 +55,7 @@ pub struct DeleteServer { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Server)] pub struct UpdateServer { /// The id of the server to update. @@ -71,7 +71,7 @@ pub struct UpdateServer { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Update)] pub struct RenameServer { /// The id of the server to rename. @@ -88,7 +88,7 @@ pub struct RenameServer { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Update)] pub struct CreateNetwork { /// Id or name @@ -96,20 +96,3 @@ pub struct CreateNetwork { /// The name of the network to create. pub name: String, } - -// - -/// Delete a docker network. -/// Response: [Update] -#[typeshare] -#[derive( - Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, -)] -#[empty_traits(MonitorWriteRequest)] -#[response(Update)] -pub struct DeleteNetwork { - /// Id or name. - pub server: String, - /// The name of the network to delete. - pub name: String, -} diff --git a/client/core/rs/src/api/write/server_template.rs b/client/core/rs/src/api/write/server_template.rs index b287f45ac..6e2feae8d 100644 --- a/client/core/rs/src/api/write/server_template.rs +++ b/client/core/rs/src/api/write/server_template.rs @@ -7,7 +7,7 @@ use crate::entities::server_template::{ PartialServerTemplateConfig, ServerTemplate, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -16,7 +16,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ServerTemplate)] pub struct CreateServerTemplate { /// The name given to newly created server template. @@ -33,7 +33,7 @@ pub struct CreateServerTemplate { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ServerTemplate)] pub struct CopyServerTemplate { /// The name of the new server template. @@ -50,7 +50,7 @@ pub struct CopyServerTemplate { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ServerTemplate)] pub struct DeleteServerTemplate { /// The id or name of the server template to delete. @@ -71,7 +71,7 @@ pub struct DeleteServerTemplate { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ServerTemplate)] pub struct UpdateServerTemplate { /// The id of the server template to update. diff --git a/client/core/rs/src/api/write/stack.rs b/client/core/rs/src/api/write/stack.rs index 01834ad6d..235974adc 100644 --- a/client/core/rs/src/api/write/stack.rs +++ b/client/core/rs/src/api/write/stack.rs @@ -9,7 +9,7 @@ use crate::entities::{ NoData, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -18,7 +18,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Stack)] pub struct CreateStack { /// The name given to newly created stack. @@ -35,7 +35,7 @@ pub struct CreateStack { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Stack)] pub struct CopyStack { /// The name of the new stack. @@ -52,7 +52,7 @@ pub struct CopyStack { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Stack)] pub struct DeleteStack { /// The id or name of the stack to delete. @@ -76,7 +76,7 @@ pub struct DeleteStack { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Stack)] pub struct UpdateStack { /// The id of the Stack to update. @@ -92,7 +92,7 @@ pub struct UpdateStack { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Update)] pub struct RenameStack { /// The id of the stack to rename. @@ -111,7 +111,7 @@ pub struct RenameStack { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(NoData)] pub struct RefreshStackCache { /// Id or name @@ -133,7 +133,7 @@ pub enum StackWebhookAction { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateStackWebhookResponse)] pub struct CreateStackWebhook { /// Id or name @@ -154,7 +154,7 @@ pub type CreateStackWebhookResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteStackWebhookResponse)] pub struct DeleteStackWebhook { /// Id or name diff --git a/client/core/rs/src/api/write/sync.rs b/client/core/rs/src/api/write/sync.rs index 4888de188..294629be4 100644 --- a/client/core/rs/src/api/write/sync.rs +++ b/client/core/rs/src/api/write/sync.rs @@ -8,7 +8,7 @@ use crate::entities::{ NoData, }; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -17,7 +17,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ResourceSync)] pub struct CreateResourceSync { /// The name given to newly created sync. @@ -34,7 +34,7 @@ pub struct CreateResourceSync { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ResourceSync)] pub struct CopyResourceSync { /// The name of the new sync. @@ -51,7 +51,7 @@ pub struct CopyResourceSync { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ResourceSync)] pub struct DeleteResourceSync { /// The id or name of the sync to delete. @@ -72,7 +72,7 @@ pub struct DeleteResourceSync { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ResourceSync)] pub struct UpdateResourceSync { /// The id of the sync to update. @@ -88,7 +88,7 @@ pub struct UpdateResourceSync { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(ResourceSync)] pub struct RefreshResourceSyncPending { /// Id or name @@ -110,7 +110,7 @@ pub enum SyncWebhookAction { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateSyncWebhookResponse)] pub struct CreateSyncWebhook { /// Id or name @@ -131,7 +131,7 @@ pub type CreateSyncWebhookResponse = NoData; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteSyncWebhookResponse)] pub struct DeleteSyncWebhook { /// Id or name diff --git a/client/core/rs/src/api/write/tags.rs b/client/core/rs/src/api/write/tags.rs index 3f7d0cade..2e5baa4d5 100644 --- a/client/core/rs/src/api/write/tags.rs +++ b/client/core/rs/src/api/write/tags.rs @@ -3,9 +3,9 @@ use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::{tag::Tag, update::ResourceTarget, NoData}; +use crate::entities::{tag::Tag, NoData, ResourceTarget}; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -14,7 +14,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Tag)] pub struct CreateTag { /// The name of the tag. @@ -30,7 +30,7 @@ pub struct CreateTag { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Tag)] pub struct DeleteTag { /// The id of the tag to delete. @@ -44,7 +44,7 @@ pub struct DeleteTag { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(Tag)] pub struct RenameTag { /// The id of the tag to rename. @@ -61,7 +61,7 @@ pub struct RenameTag { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateTagsOnResourceResponse)] pub struct UpdateTagsOnResource { pub target: ResourceTarget, diff --git a/client/core/rs/src/api/write/user.rs b/client/core/rs/src/api/write/user.rs index 3bf4c8db9..2ecec9896 100644 --- a/client/core/rs/src/api/write/user.rs +++ b/client/core/rs/src/api/write/user.rs @@ -5,7 +5,7 @@ use typeshare::typeshare; use crate::entities::user::User; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; // @@ -15,7 +15,7 @@ use super::MonitorWriteRequest; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateServiceUserResponse)] pub struct CreateServiceUser { /// The username for the service user. @@ -35,7 +35,7 @@ pub type CreateServiceUserResponse = User; #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateServiceUserDescriptionResponse)] pub struct UpdateServiceUserDescription { /// The service user's username diff --git a/client/core/rs/src/api/write/user_group.rs b/client/core/rs/src/api/write/user_group.rs index 39a28821a..dbb5f722a 100644 --- a/client/core/rs/src/api/write/user_group.rs +++ b/client/core/rs/src/api/write/user_group.rs @@ -5,14 +5,14 @@ use typeshare::typeshare; use crate::entities::user_group::UserGroup; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; /// **Admin only.** Create a user group. Response: [UserGroup] #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UserGroup)] pub struct CreateUserGroup { /// The name to assign to the new UserGroup @@ -26,7 +26,7 @@ pub struct CreateUserGroup { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UserGroup)] pub struct RenameUserGroup { /// The id of the UserGroup @@ -42,7 +42,7 @@ pub struct RenameUserGroup { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UserGroup)] pub struct DeleteUserGroup { /// The id of the UserGroup @@ -56,7 +56,7 @@ pub struct DeleteUserGroup { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UserGroup)] pub struct AddUserToUserGroup { /// The name or id of UserGroup that user should be added to. @@ -72,7 +72,7 @@ pub struct AddUserToUserGroup { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UserGroup)] pub struct RemoveUserFromUserGroup { /// The name or id of UserGroup that user should be removed from. @@ -89,7 +89,7 @@ pub struct RemoveUserFromUserGroup { #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UserGroup)] pub struct SetUsersInUserGroup { /// Id or name. diff --git a/client/core/rs/src/api/write/variable.rs b/client/core/rs/src/api/write/variable.rs index 479dbc676..2f55f637e 100644 --- a/client/core/rs/src/api/write/variable.rs +++ b/client/core/rs/src/api/write/variable.rs @@ -5,14 +5,14 @@ use typeshare::typeshare; use crate::entities::variable::Variable; -use super::MonitorWriteRequest; +use super::KomodoWriteRequest; /// **Admin only.** Create variable. Response: [Variable]. #[typeshare] #[derive( Debug, Clone, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(CreateVariableResponse)] pub struct CreateVariable { /// The name of the variable to create. @@ -23,6 +23,9 @@ pub struct CreateVariable { /// The initial value of the description. default: "". #[serde(default)] pub description: String, + /// Whether to make this a secret variable. + #[serde(default)] + pub is_secret: bool, } #[typeshare] @@ -30,12 +33,12 @@ pub type CreateVariableResponse = Variable; // -/// **Admin only.** Update variable. Response: [Variable]. +/// **Admin only.** Update variable value. Response: [Variable]. #[typeshare] #[derive( Debug, Clone, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateVariableValueResponse)] pub struct UpdateVariableValue { /// The name of the variable to update. @@ -49,12 +52,12 @@ pub type UpdateVariableValueResponse = Variable; // -/// **Admin only.** Update variable. Response: [Variable]. +/// **Admin only.** Update variable description. Response: [Variable]. #[typeshare] #[derive( Debug, Clone, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(UpdateVariableDescriptionResponse)] pub struct UpdateVariableDescription { /// The name of the variable to update. @@ -68,12 +71,31 @@ pub type UpdateVariableDescriptionResponse = Variable; // +/// **Admin only.** Update whether variable is secret. Response: [Variable]. +#[typeshare] +#[derive( + Debug, Clone, Serialize, Deserialize, Request, EmptyTraits, +)] +#[empty_traits(KomodoWriteRequest)] +#[response(UpdateVariableIsSecretResponse)] +pub struct UpdateVariableIsSecret { + /// The name of the variable to update. + pub name: String, + /// Whether variable is secret. + pub is_secret: bool, +} + +#[typeshare] +pub type UpdateVariableIsSecretResponse = Variable; + +// + /// **Admin only.** Delete a variable. Response: [Variable]. #[typeshare] #[derive( Debug, Clone, Serialize, Deserialize, Request, EmptyTraits, )] -#[empty_traits(MonitorWriteRequest)] +#[empty_traits(KomodoWriteRequest)] #[response(DeleteVariableResponse)] pub struct DeleteVariable { pub name: String, diff --git a/client/core/rs/src/busy.rs b/client/core/rs/src/busy.rs index 9443b4fe7..277cf4d96 100644 --- a/client/core/rs/src/busy.rs +++ b/client/core/rs/src/busy.rs @@ -14,6 +14,12 @@ impl Busy for ServerActionState { self.pruning_containers || self.pruning_images || self.pruning_networks + || self.pruning_volumes + || self.starting_containers + || self.restarting_containers + || self.pausing_containers + || self.unpausing_containers + || self.stopping_containers } } @@ -23,8 +29,9 @@ impl Busy for DeploymentActionState { || self.starting || self.restarting || self.pausing + || self.unpausing || self.stopping - || self.removing + || self.destroying || self.renaming } } @@ -35,6 +42,7 @@ impl Busy for StackActionState { || self.starting || self.restarting || self.pausing + || self.unpausing || self.stopping || self.destroying } diff --git a/client/core/rs/src/entities/alert.rs b/client/core/rs/src/entities/alert.rs index aba893d0a..67d548631 100644 --- a/client/core/rs/src/entities/alert.rs +++ b/client/core/rs/src/entities/alert.rs @@ -2,13 +2,14 @@ use std::path::PathBuf; use derive_variants::EnumVariants; use serde::{Deserialize, Serialize}; +use strum::{Display, EnumString}; use typeshare::typeshare; use crate::entities::{MongoId, I64}; use super::{ - _Serror, deployment::DeploymentState, server::stats::SeverityLevel, - stack::StackState, update::ResourceTarget, Version, + _Serror, deployment::DeploymentState, stack::StackState, + ResourceTarget, Version, }; /// Representation of an alert in the system. @@ -206,3 +207,30 @@ impl Default for AlertDataVariant { AlertDataVariant::None } } + +/// Severity level of problem. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Default, + Display, + EnumString, +)] +#[serde(rename_all = "UPPERCASE")] +#[strum(serialize_all = "UPPERCASE")] +pub enum SeverityLevel { + /// No problem. + #[default] + Ok, + /// Problem is imminent. + Warning, + /// Problem fully realized. + Critical, +} diff --git a/client/core/rs/src/entities/alerter.rs b/client/core/rs/src/entities/alerter.rs index f40819bfe..087ca0559 100644 --- a/client/core/rs/src/entities/alerter.rs +++ b/client/core/rs/src/entities/alerter.rs @@ -10,7 +10,7 @@ use typeshare::typeshare; use super::{ alert::AlertDataVariant, resource::{Resource, ResourceListItem, ResourceQuery}, - update::ResourceTarget, + ResourceTarget, }; #[typeshare] diff --git a/client/core/rs/src/entities/build.rs b/client/core/rs/src/entities/build.rs index be559109a..efdf11df8 100644 --- a/client/core/rs/src/entities/build.rs +++ b/client/core/rs/src/entities/build.rs @@ -110,7 +110,7 @@ pub struct BuildConfig { pub image_name: String, /// An extra tag put before the build version, for the image pushed to the repository. - /// Eg. in image tag of `aarch64` would push to mbecker20/monitor_core:aarch64-1.13.2. + /// Eg. in image tag of `aarch64` would push to mbecker20/komodo:1.13.2-aarch64. /// If this is empty, the image tag will just be the build version. /// /// Can be used in conjunction with `image_name` to direct multiple builds @@ -128,7 +128,7 @@ pub struct BuildConfig { /// Whether to use https to clone the repo (versus http). Default: true /// - /// Note. Monitor does not currently support cloning repos via ssh. + /// Note. Komodo does not currently support cloning repos via ssh. #[serde(default = "default_git_https")] #[builder(default = "default_git_https()")] #[partial_default(default_git_https())] diff --git a/client/core/rs/src/entities/builder.rs b/client/core/rs/src/entities/builder.rs index 240a7c015..5e05e4e5c 100644 --- a/client/core/rs/src/entities/builder.rs +++ b/client/core/rs/src/entities/builder.rs @@ -249,7 +249,7 @@ impl MergePartial for BuilderConfig { #[typeshare(serialized_as = "Partial")] pub type _PartialServerBuilderConfig = PartialServerBuilderConfig; -/// Configuration for a monitor server builder. +/// Configuration for a Komodo Server Builder. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Default, Builder, Partial, diff --git a/client/core/rs/src/entities/config/core.rs b/client/core/rs/src/entities/config/core.rs index fdbed7d85..cb76159be 100644 --- a/client/core/rs/src/entities/config/core.rs +++ b/client/core/rs/src/entities/config/core.rs @@ -1,9 +1,9 @@ -//! # Configuring the Core API +//! # Configuring the Komodo Core API //! -//! Monitor core is configured by parsing base configuration file ([CoreConfig]), and overriding +//! Komodo Core is configured by parsing base configuration file ([CoreConfig]), and overriding //! any fields given in the file with ones provided on the environment ([Env]). //! -//! The recommended method for running monitor core is via the docker image. This image has a default +//! The recommended method for running Komodo Core is via the docker image. This image has a default //! configuration file provided in the image, meaning any custom configuration can be provided //! on the environment alone. However, if a custom configuration file is prefered, it can be mounted //! into the image at `/config/config.toml`. @@ -20,125 +20,126 @@ use crate::entities::{ use super::{DockerRegistry, GitProvider}; -/// # Monitor Core Environment Variables +/// # Komodo Core Environment Variables /// /// You can override any fields of the [CoreConfig] by passing the associated /// environment variable. The variables should be passed in the traditional `UPPER_SNAKE_CASE` format, /// although the lower case format can still be parsed. /// -/// *Note.* The monitor core docker image includes the default core configuration found in -/// the `mbecker20/monitor/config_example` folder of the repo. To configure the core api, -/// you can either mount your own custom configuration file to `/config/config.toml` inside the container, +/// *Note.* The Komodo Core docker image includes the default core configuration found at +/// [https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml). +/// To configure the core api, you can either mount your own custom configuration file to +/// `/config/config.toml` inside the container, /// or simply override whichever fields you need using the environment. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Env { /// Specify a custom config path for the core config toml. /// Default: `/config/config.toml` #[serde(default = "default_config_path")] - pub monitor_config_path: String, + pub komodo_config_path: String, /// Override `title` - pub monitor_title: Option, + pub komodo_title: Option, /// Override `host` - pub monitor_host: Option, + pub komodo_host: Option, /// Override `port` - pub monitor_port: Option, + pub komodo_port: Option, /// Override `passkey` - pub monitor_passkey: Option, + pub komodo_passkey: Option, /// Override `ensure_server` - pub monitor_ensure_server: Option, + pub komodo_ensure_server: Option, /// Override `jwt_secret` - pub monitor_jwt_secret: Option, + pub komodo_jwt_secret: Option, /// Override `jwt_ttl` - pub monitor_jwt_ttl: Option, + pub komodo_jwt_ttl: Option, /// Override `repo_directory` - pub monitor_repo_directory: Option, + pub komodo_repo_directory: Option, /// Override `sync_poll_interval` - pub monitor_sync_poll_interval: Option, + pub komodo_sync_poll_interval: Option, /// Override `stack_poll_interval` - pub monitor_stack_poll_interval: Option, + pub komodo_stack_poll_interval: Option, /// Override `build_poll_interval` - pub monitor_build_poll_interval: Option, + pub komodo_build_poll_interval: Option, /// Override `repo_poll_interval` - pub monitor_repo_poll_interval: Option, + pub komodo_repo_poll_interval: Option, /// Override `monitoring_interval` - pub monitor_monitoring_interval: Option, + pub komodo_monitoring_interval: Option, /// Override `keep_stats_for_days` - pub monitor_keep_stats_for_days: Option, + pub komodo_keep_stats_for_days: Option, /// Override `keep_alerts_for_days` - pub monitor_keep_alerts_for_days: Option, + pub komodo_keep_alerts_for_days: Option, /// Override `webhook_secret` - pub monitor_webhook_secret: Option, + pub komodo_webhook_secret: Option, /// Override `webhook_base_url` - pub monitor_webhook_base_url: Option, + pub komodo_webhook_base_url: Option, /// Override `logging.level` - pub monitor_logging_level: Option, + pub komodo_logging_level: Option, /// Override `logging.stdio` - pub monitor_logging_stdio: Option, + pub komodo_logging_stdio: Option, /// Override `logging.otlp_endpoint` - pub monitor_logging_otlp_endpoint: Option, + pub komodo_logging_otlp_endpoint: Option, /// Override `logging.opentelemetry_service_name` - pub monitor_logging_opentelemetry_service_name: Option, + pub komodo_logging_opentelemetry_service_name: Option, /// Override `transparent_mode` - pub monitor_transparent_mode: Option, + pub komodo_transparent_mode: Option, /// Override `ui_write_disabled` - pub monitor_ui_write_disabled: Option, + pub komodo_ui_write_disabled: Option, /// Override `enable_new_users` - pub monitor_enable_new_users: Option, + pub komodo_enable_new_users: Option, /// Override `local_auth` - pub monitor_local_auth: Option, + pub komodo_local_auth: Option, /// Override `google_oauth.enabled` - pub monitor_google_oauth_enabled: Option, + pub komodo_google_oauth_enabled: Option, /// Override `google_oauth.id` - pub monitor_google_oauth_id: Option, + pub komodo_google_oauth_id: Option, /// Override `google_oauth.secret` - pub monitor_google_oauth_secret: Option, + pub komodo_google_oauth_secret: Option, /// Override `github_oauth.enabled` - pub monitor_github_oauth_enabled: Option, + pub komodo_github_oauth_enabled: Option, /// Override `github_oauth.id` - pub monitor_github_oauth_id: Option, + pub komodo_github_oauth_id: Option, /// Override `github_oauth.secret` - pub monitor_github_oauth_secret: Option, + pub komodo_github_oauth_secret: Option, /// Override `github_webhook_app.app_id` - pub monitor_github_webhook_app_app_id: Option, + pub komodo_github_webhook_app_app_id: Option, /// Override `github_webhook_app.installations[i].id`. Accepts comma seperated list. /// - /// Note. Paired by index with values in `monitor_github_webhook_app_installations_namespaces` - pub monitor_github_webhook_app_installations_ids: Option>, + /// Note. Paired by index with values in `komodo_github_webhook_app_installations_namespaces` + pub komodo_github_webhook_app_installations_ids: Option>, /// Override `github_webhook_app.installations[i].namespace`. Accepts comma seperated list. /// - /// Note. Paired by index with values in `monitor_github_webhook_app_installations_ids` - pub monitor_github_webhook_app_installations_namespaces: + /// Note. Paired by index with values in `komodo_github_webhook_app_installations_ids` + pub komodo_github_webhook_app_installations_namespaces: Option>, /// Override `github_webhook_app.pk_path` - pub monitor_github_webhook_app_pk_path: Option, + pub komodo_github_webhook_app_pk_path: Option, /// Override `mongo.uri` - pub monitor_mongo_uri: Option, + pub komodo_mongo_uri: Option, /// Override `mongo.address` - pub monitor_mongo_address: Option, + pub komodo_mongo_address: Option, /// Override `mongo.username` - pub monitor_mongo_username: Option, + pub komodo_mongo_username: Option, /// Override `mongo.password` - pub monitor_mongo_password: Option, + pub komodo_mongo_password: Option, /// Override `mongo.app_name` - pub monitor_mongo_app_name: Option, + pub komodo_mongo_app_name: Option, /// Override `mongo.db_name` - pub monitor_mongo_db_name: Option, + pub komodo_mongo_db_name: Option, /// Override `aws.access_key_id` - pub monitor_aws_access_key_id: Option, + pub komodo_aws_access_key_id: Option, /// Override `aws.secret_access_key` - pub monitor_aws_secret_access_key: Option, + pub komodo_aws_secret_access_key: Option, /// Override `hetzner.token` - pub monitor_hetzner_token: Option, + pub komodo_hetzner_token: Option, } fn default_config_path() -> String { @@ -148,27 +149,28 @@ fn default_config_path() -> String { /// # Core Configuration File /// /// The Core API initializes it's configuration by reading the environment, -/// parsing the [CoreConfig] schema from the file path specified by `env.monitor_config_path`, +/// parsing the [CoreConfig] schema from the file path specified by `env.komodo_config_path`, /// and then applying any config field overrides specified in the environment. /// -/// *Note.* The monitor core docker image includes the default core configuration found below. -/// To configure the core api, you can either mount your own custom configuration file -/// to `/config/config.toml` inside the container, or simply override whichever fields -/// you need using the environment. +/// *Note.* The Komodo Core docker image includes the default core configuration found at +/// [https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml). +/// To configure the core api, you can either mount your own custom configuration file to +/// `/config/config.toml` inside the container, +/// or simply override whichever fields you need using the environment. /// -/// Refer to the [example file](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml) for a full example. +/// Refer to the [example file](https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml) for a full example. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CoreConfig { // =========== // = General = // =========== - /// The title of this monitor deployment. Will be used in the browser page title. - /// Default: 'Monitor' + /// The title of this Komodo Core deployment. Will be used in the browser page title. + /// Default: 'Komodo' #[serde(default = "default_title")] pub title: String, /// The host to use with oauth redirect url, whatever host - /// the user hits to access monitor. eg `https://monitor.mogh.tech`. + /// the user hits to access Komodo. eg `https://komodo.domain.com`. /// Only used if oauth used without user specifying redirect url themselves. #[serde(default)] pub host: String, @@ -188,7 +190,7 @@ pub struct CoreConfig { /// If defined, ensure an enabled server exists at this address. /// Use with All In One compose. - /// Example: `http://monitor-periphery:8120` + /// Example: `http://komodo-periphery:8120` #[serde(default)] pub ensure_server: String, @@ -249,15 +251,15 @@ pub struct CoreConfig { pub webhook_secret: String, /// Override the webhook listener base url, if None will use the address defined as 'host'. - /// Example: `https://webhooks.mogh.tech` + /// Example: `https://webhooks.komo.do` /// - /// This can be used if core sits on an internal network which is + /// This can be used if Komodo Core sits on an internal network which is /// unreachable directly from the open internet. - /// A reverse proxy in a public network can forward webhooks to the internal monitor. + /// A reverse proxy in a public network can forward webhooks to Komodo. pub webhook_base_url: Option, /// Configure a Github Webhook app. - /// Allows users to manage repo webhooks from within the Monitor UI. + /// Allows users to manage repo webhooks from within the Komodo UI. #[serde(default)] pub github_webhook_app: GithubWebhookAppConfig, @@ -367,7 +369,7 @@ pub struct CoreConfig { } fn default_title() -> String { - String::from("Monitor") + String::from("Komodo") } fn default_core_port() -> u16 { @@ -530,21 +532,21 @@ pub struct MongoConfig { pub username: Option, /// Mongo user password pub password: Option, - /// Mongo app name. default: `monitor_core` + /// Mongo app name. default: `komodo_core` #[serde(default = "default_core_mongo_app_name")] pub app_name: String, /// Mongo db name. Which mongo database to create the collections in. - /// Default: `monitor`. + /// Default: `komodo`. #[serde(default = "default_core_mongo_db_name")] pub db_name: String, } fn default_core_mongo_app_name() -> String { - "monitor_core".to_string() + "komodo_core".to_string() } fn default_core_mongo_db_name() -> String { - "monitor".to_string() + "komodo".to_string() } impl Default for MongoConfig { @@ -560,7 +562,7 @@ impl Default for MongoConfig { } } -/// Provide AWS credentials for monitor to use. +/// Provide AWS credentials for Komodo to use. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct AwsCredentials { /// The aws ACCESS_KEY_ID @@ -569,7 +571,7 @@ pub struct AwsCredentials { pub secret_access_key: String, } -/// Provide Hetzner credentials for monitor to use. +/// Provide Hetzner credentials for Komodo to use. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct HetznerCredentials { pub token: String, diff --git a/client/core/rs/src/entities/config/periphery.rs b/client/core/rs/src/entities/config/periphery.rs index ee6067c18..b2f2a6079 100644 --- a/client/core/rs/src/entities/config/periphery.rs +++ b/client/core/rs/src/entities/config/periphery.rs @@ -1,4 +1,4 @@ -//! # Configuring the periphery agent +//! # Configuring the Komodo Periphery Agent //! //! The periphery configuration is passed in three ways: //! 1. Command line args ([CliArgs]) @@ -143,7 +143,7 @@ pub struct Env { /// # Periphery Configuration File /// -/// Refer to the [example file](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml) for a full example. +/// Refer to the [example file](https://github.com/mbecker20/komodo/blob/main/config_example/periphery.config.example.toml) for a full example. #[derive(Debug, Clone, Deserialize)] pub struct PeripheryConfig { /// The port periphery will run on. @@ -151,13 +151,13 @@ pub struct PeripheryConfig { #[serde(default = "default_periphery_port")] pub port: u16, - /// The system directory where monitor managed repos will be cloned. - /// Default: `/etc/monitor/repos` + /// The system directory where Komodo managed repos will be cloned. + /// Default: `/etc/komodo/repos` #[serde(default = "default_repo_dir")] pub repo_dir: PathBuf, /// The system directory where stacks will managed. - /// Default: `/etc/monitor/stacks` + /// Default: `/etc/komodo/stacks` #[serde(default = "default_stack_dir")] pub stack_dir: PathBuf, @@ -219,11 +219,11 @@ fn default_periphery_port() -> u16 { } fn default_repo_dir() -> PathBuf { - "/etc/monitor/repos".parse().unwrap() + "/etc/komodo/repos".parse().unwrap() } fn default_stack_dir() -> PathBuf { - "/etc/monitor/stacks".parse().unwrap() + "/etc/komodo/stacks".parse().unwrap() } fn default_stats_polling_rate() -> Timelength { diff --git a/client/core/rs/src/entities/deployment.rs b/client/core/rs/src/entities/deployment.rs index f063e7c21..a5832dba1 100644 --- a/client/core/rs/src/entities/deployment.rs +++ b/client/core/rs/src/entities/deployment.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; - use anyhow::Context; use bson::{doc, Document}; use derive_builder::Builder; @@ -14,8 +12,9 @@ use strum::{Display, EnumString}; use typeshare::typeshare; use super::{ + docker::container::ContainerStateStatusEnum, resource::{Resource, ResourceListItem, ResourceQuery}, - EnvironmentVar, Version, + EnvironmentVar, TerminationSignal, Version, }; #[typeshare] @@ -36,7 +35,7 @@ pub struct DeploymentListItemInfo { pub image: String, /// The server that deployment sits on. pub server_id: String, - /// An attached monitor build, if it exists. + /// An attached Komodo Build, if it exists. pub build_id: Option, } @@ -55,7 +54,7 @@ pub struct DeploymentConfig { pub server_id: String, /// The image which the deployment deploys. - /// Can either be a user inputted image, or a Monitor build. + /// Can either be a user inputted image, or a Komodo Build. #[serde(default)] #[builder(default)] pub image: DeploymentImage, @@ -253,9 +252,9 @@ pub enum DeploymentImage { image: String, }, - /// Deploy a monitor build. + /// Deploy a Komodo Build. Build { - /// The id of the build + /// The id of the Build #[serde(default, alias = "build")] build_id: String, /// Use a custom / older version of the image produced by the build. @@ -431,47 +430,6 @@ impl<'de> Visitor<'de> for OptionConversionVisitor { } } -/// A summary of a docker container on a server. -#[typeshare] -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ContainerSummary { - /// Name of the container. - pub name: String, - /// Id of the container. - pub id: String, - /// The image the container is based on. - pub image: String, - /// The docker labels on the container. - pub labels: HashMap, - /// The state of the container, like `running` or `not_deployed` - pub state: DeploymentState, - /// The status string of the docker container. - pub status: Option, - /// The network mode of the container. - pub network_mode: Option, - /// Network names attached to the container - pub networks: Option>, -} - -#[typeshare] -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DockerContainerStats { - #[serde(alias = "Name")] - pub name: String, - #[serde(alias = "CPUPerc")] - pub cpu_perc: String, - #[serde(alias = "MemPerc")] - pub mem_perc: String, - #[serde(alias = "MemUsage")] - pub mem_usage: String, - #[serde(alias = "NetIO")] - pub net_io: String, - #[serde(alias = "BlockIO")] - pub block_io: String, - #[serde(alias = "PIDs")] - pub pids: String, -} - /// Variants de/serialized from/to snake_case. /// /// Eg. @@ -507,6 +465,23 @@ pub enum DeploymentState { Dead, } +impl From for DeploymentState { + fn from(value: ContainerStateStatusEnum) -> Self { + match value { + ContainerStateStatusEnum::Empty => DeploymentState::Unknown, + ContainerStateStatusEnum::Created => DeploymentState::Created, + ContainerStateStatusEnum::Running => DeploymentState::Running, + ContainerStateStatusEnum::Paused => DeploymentState::Paused, + ContainerStateStatusEnum::Restarting => { + DeploymentState::Restarting + } + ContainerStateStatusEnum::Removing => DeploymentState::Removing, + ContainerStateStatusEnum::Exited => DeploymentState::Exited, + ContainerStateStatusEnum::Dead => DeploymentState::Dead, + } + } +} + #[typeshare] #[derive( Serialize, @@ -537,34 +512,6 @@ pub enum RestartMode { UnlessStopped, } -#[typeshare] -#[derive( - Serialize, - Deserialize, - Debug, - PartialEq, - Hash, - Eq, - Clone, - Copy, - Default, - Display, - EnumString, -)] -#[serde(rename_all = "UPPERCASE")] -#[strum(serialize_all = "UPPERCASE")] -pub enum TerminationSignal { - #[serde(alias = "1")] - SigHup, - #[serde(alias = "2")] - SigInt, - #[serde(alias = "3")] - SigQuit, - #[default] - #[serde(alias = "15")] - SigTerm, -} - #[typeshare] #[derive( Serialize, @@ -749,7 +696,7 @@ pub struct DeploymentActionState { pub pausing: bool, pub unpausing: bool, pub stopping: bool, - pub removing: bool, + pub destroying: bool, pub renaming: bool, } diff --git a/client/core/rs/src/entities/docker/container.rs b/client/core/rs/src/entities/docker/container.rs new file mode 100644 index 000000000..e5551cba2 --- /dev/null +++ b/client/core/rs/src/entities/docker/container.rs @@ -0,0 +1,1161 @@ +use std::collections::HashMap; + +use anyhow::anyhow; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::{Usize, I64}; + +use super::{ContainerConfig, GraphDriverData, PortBinding}; + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ContainerListItem { + /// The first name in Names, not including the initial '/' + pub name: String, + /// The ID of this container + pub id: Option, + /// The name of the image used when creating this container + pub image: Option, + /// The ID of the image that this container was created from + pub image_id: Option, + /// When the container was created + pub created: Option, + /// The size of files that have been created or changed by this container + pub size_rw: Option, + /// The total size of all the files in this container + pub size_root_fs: Option, + /// The state of this container (e.g. `exited`) + pub state: ContainerStateStatusEnum, + /// Additional human-readable status of this container (e.g. `Exit 0`) + pub status: Option, + /// The network mode + pub network_mode: Option, + /// The network names attached to container + pub networks: Vec, + /// The volume names attached to container + pub volumes: Vec, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct NameAndId { + pub name: String, + pub id: String, +} + +/// An open port on a container +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct Port { + /// Host IP address that the container's port is mapped to + #[serde(rename = "IP")] + pub ip: Option, + + /// Port on the container + #[serde(default, rename = "PrivatePort")] + pub private_port: u16, + + /// Port exposed on the host + #[serde(rename = "PublicPort")] + pub public_port: Option, + + #[serde(default, rename = "Type")] + pub typ: PortTypeEnum, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + Serialize, + Deserialize, +)] +pub enum PortTypeEnum { + #[default] + #[serde(rename = "")] + EMPTY, + #[serde(rename = "tcp")] + TCP, + #[serde(rename = "udp")] + UDP, + #[serde(rename = "sctp")] + SCTP, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct Container { + /// The ID of the container + #[serde(rename = "Id")] + pub id: Option, + + /// The time the container was created + #[serde(rename = "Created")] + pub created: Option, + + /// The path to the command being run + #[serde(rename = "Path")] + pub path: Option, + + /// The arguments to the command being run + #[serde(default, rename = "Args")] + pub args: Vec, + + #[serde(rename = "State")] + pub state: Option, + + /// The container's image ID + #[serde(rename = "Image")] + pub image: Option, + + #[serde(rename = "ResolvConfPath")] + pub resolv_conf_path: Option, + + #[serde(rename = "HostnamePath")] + pub hostname_path: Option, + + #[serde(rename = "HostsPath")] + pub hosts_path: Option, + + #[serde(rename = "LogPath")] + pub log_path: Option, + + #[serde(rename = "Name")] + pub name: Option, + + #[serde(rename = "RestartCount")] + pub restart_count: Option, + + #[serde(rename = "Driver")] + pub driver: Option, + + #[serde(rename = "Platform")] + pub platform: Option, + + #[serde(rename = "MountLabel")] + pub mount_label: Option, + + #[serde(rename = "ProcessLabel")] + pub process_label: Option, + + #[serde(rename = "AppArmorProfile")] + pub app_armor_profile: Option, + + /// IDs of exec instances that are running in the container. + #[serde(default, rename = "ExecIDs")] + pub exec_ids: Vec, + + #[serde(rename = "HostConfig")] + pub host_config: Option, + + #[serde(rename = "GraphDriver")] + pub graph_driver: Option, + + /// The size of files that have been created or changed by this container. + #[serde(rename = "SizeRw")] + pub size_rw: Option, + + /// The total size of all the files in this container. + #[serde(rename = "SizeRootFs")] + pub size_root_fs: Option, + + #[serde(default, rename = "Mounts")] + pub mounts: Vec, + + #[serde(rename = "Config")] + pub config: Option, + + #[serde(rename = "NetworkSettings")] + pub network_settings: Option, +} + +/// ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the \"inspect\" command. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ContainerState { + /// String representation of the container state. Can be one of \"created\", \"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\". + #[serde(default, rename = "Status")] + pub status: ContainerStateStatusEnum, + + /// Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is \"running\". + #[serde(rename = "Running")] + pub running: Option, + + /// Whether this container is paused. + #[serde(rename = "Paused")] + pub paused: Option, + + /// Whether this container is restarting. + #[serde(rename = "Restarting")] + pub restarting: Option, + + /// Whether a process within this container has been killed because it ran out of memory since the container was last started. + #[serde(rename = "OOMKilled")] + pub oom_killed: Option, + + #[serde(rename = "Dead")] + pub dead: Option, + + /// The process ID of this container + #[serde(rename = "Pid")] + pub pid: Option, + + /// The last exit code of this container + #[serde(rename = "ExitCode")] + pub exit_code: Option, + + #[serde(rename = "Error")] + pub error: Option, + + /// The time when this container was last started. + #[serde(rename = "StartedAt")] + pub started_at: Option, + + /// The time when this container last exited. + #[serde(rename = "FinishedAt")] + pub finished_at: Option, + + #[serde(rename = "Health")] + pub health: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum ContainerStateStatusEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "created")] + Created, + #[serde(rename = "running")] + Running, + #[serde(rename = "paused")] + Paused, + #[serde(rename = "restarting")] + Restarting, + #[serde(rename = "removing")] + Removing, + #[serde(rename = "exited")] + Exited, + #[serde(rename = "dead")] + Dead, +} + +impl ::std::str::FromStr for ContainerStateStatusEnum { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + match s { + "" => Ok(ContainerStateStatusEnum::Empty), + "created" => Ok(ContainerStateStatusEnum::Created), + "running" => Ok(ContainerStateStatusEnum::Running), + "paused" => Ok(ContainerStateStatusEnum::Paused), + "restarting" => Ok(ContainerStateStatusEnum::Restarting), + "removing" => Ok(ContainerStateStatusEnum::Removing), + "exited" => Ok(ContainerStateStatusEnum::Exited), + "dead" => Ok(ContainerStateStatusEnum::Dead), + x => Err(anyhow!("Invalid container state: {}", x)), + } + } +} + +/// Health stores information about the container's healthcheck results. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ContainerHealth { + /// Status is one of `none`, `starting`, `healthy` or `unhealthy` - \"none\" Indicates there is no healthcheck - \"starting\" Starting indicates that the container is not yet ready - \"healthy\" Healthy indicates that the container is running correctly - \"unhealthy\" Unhealthy indicates that the container has a problem + #[serde(default, rename = "Status")] + pub status: HealthStatusEnum, + + /// FailingStreak is the number of consecutive failures + #[serde(rename = "FailingStreak")] + pub failing_streak: Option, + + /// Log contains the last few results (oldest first) + #[serde(default, rename = "Log")] + pub log: Vec, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum HealthStatusEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "none")] + None, + #[serde(rename = "starting")] + Starting, + #[serde(rename = "healthy")] + Healthy, + #[serde(rename = "unhealthy")] + Unhealthy, +} + +/// HealthcheckResult stores information about a single run of a healthcheck probe +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct HealthcheckResult { + /// Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + #[serde(rename = "Start")] + pub start: Option, + + /// Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + #[serde(rename = "End")] + pub end: Option, + + /// ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe + #[serde(rename = "ExitCode")] + pub exit_code: Option, + + /// Output from last check + #[serde(rename = "Output")] + pub output: Option, +} + +/// Container configuration that depends on the host we are running on +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct HostConfig { + /// An integer value representing this container's relative CPU weight versus other containers. + #[serde(rename = "CpuShares")] + pub cpu_shares: Option, + + /// Memory limit in bytes. + #[serde(rename = "Memory")] + pub memory: Option, + + /// Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. + #[serde(rename = "CgroupParent")] + pub cgroup_parent: Option, + + /// Block IO weight (relative weight). + #[serde(rename = "BlkioWeight")] + pub blkio_weight: Option, + + /// Block IO weight (relative device weight) in the form: ``` [{\"Path\": \"device_path\", \"Weight\": weight}] ``` + #[serde(default, rename = "BlkioWeightDevice")] + pub blkio_weight_device: Vec, + + /// Limit read rate (bytes per second) from a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` + #[serde(default, rename = "BlkioDeviceReadBps")] + pub blkio_device_read_bps: Vec, + + /// Limit write rate (bytes per second) to a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` + #[serde(default, rename = "BlkioDeviceWriteBps")] + pub blkio_device_write_bps: Vec, + + /// Limit read rate (IO per second) from a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` + #[serde(default, rename = "BlkioDeviceReadIOps")] + pub blkio_device_read_iops: Vec, + + /// Limit write rate (IO per second) to a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` + #[serde(default, rename = "BlkioDeviceWriteIOps")] + pub blkio_device_write_iops: Vec, + + /// The length of a CPU period in microseconds. + #[serde(rename = "CpuPeriod")] + pub cpu_period: Option, + + /// Microseconds of CPU time that the container can get in a CPU period. + #[serde(rename = "CpuQuota")] + pub cpu_quota: Option, + + /// The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. + #[serde(rename = "CpuRealtimePeriod")] + pub cpu_realtime_period: Option, + + /// The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. + #[serde(rename = "CpuRealtimeRuntime")] + pub cpu_realtime_runtime: Option, + + /// CPUs in which to allow execution (e.g., `0-3`, `0,1`). + #[serde(rename = "CpusetCpus")] + pub cpuset_cpus: Option, + + /// Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + #[serde(rename = "CpusetMems")] + pub cpuset_mems: Option, + + /// A list of devices to add to the container. + #[serde(default, rename = "Devices")] + pub devices: Vec, + + /// a list of cgroup rules to apply to the container + #[serde(default, rename = "DeviceCgroupRules")] + pub device_cgroup_rules: Vec, + + /// A list of requests for devices to be sent to device drivers. + #[serde(default, rename = "DeviceRequests")] + pub device_requests: Vec, + + /// Hard limit for kernel TCP buffer memory (in bytes). Depending on the OCI runtime in use, this option may be ignored. It is no longer supported by the default (runc) runtime. This field is omitted when empty. + #[serde(rename = "KernelMemoryTCP")] + pub kernel_memory_tcp: Option, + + /// Memory soft limit in bytes. + #[serde(rename = "MemoryReservation")] + pub memory_reservation: Option, + + /// Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. + #[serde(rename = "MemorySwap")] + pub memory_swap: Option, + + /// Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. + #[serde(rename = "MemorySwappiness")] + pub memory_swappiness: Option, + + /// CPU quota in units of 10-9 CPUs. + #[serde(rename = "NanoCpus")] + pub nano_cpus: Option, + + /// Disable OOM Killer for the container. + #[serde(rename = "OomKillDisable")] + pub oom_kill_disable: Option, + + /// Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. + #[serde(rename = "Init")] + pub init: Option, + + /// Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. + #[serde(rename = "PidsLimit")] + pub pids_limit: Option, + + /// A list of resource limits to set in the container. For example: ``` {\"Name\": \"nofile\", \"Soft\": 1024, \"Hard\": 2048} ``` + #[serde(default, rename = "Ulimits")] + pub ulimits: Vec, + + /// The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + #[serde(rename = "CpuCount")] + pub cpu_count: Option, + + /// The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + #[serde(rename = "CpuPercent")] + pub cpu_percent: Option, + + /// Maximum IOps for the container system drive (Windows only) + #[serde(rename = "IOMaximumIOps")] + pub io_maximum_iops: Option, + + /// Maximum IO in bytes per second for the container system drive (Windows only). + #[serde(rename = "IOMaximumBandwidth")] + pub io_maximum_bandwidth: Option, + + /// A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. + #[serde(default, rename = "Binds")] + pub binds: Vec, + + /// Path to a file where the container ID is written + #[serde(rename = "ContainerIDFile")] + pub container_id_file: Option, + + #[serde(rename = "LogConfig")] + pub log_config: Option, + + /// Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to. + #[serde(rename = "NetworkMode")] + pub network_mode: Option, + + #[serde(default, rename = "PortBindings")] + pub port_bindings: HashMap>, + + #[serde(rename = "RestartPolicy")] + pub restart_policy: Option, + + /// Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. + #[serde(rename = "AutoRemove")] + pub auto_remove: Option, + + /// Driver that this container uses to mount volumes. + #[serde(rename = "VolumeDriver")] + pub volume_driver: Option, + + /// A list of volumes to inherit from another container, specified in the form `[:]`. + #[serde(default, rename = "VolumesFrom")] + pub volumes_from: Vec, + + /// Specification for mounts to be added to the container. + #[serde(default, rename = "Mounts")] + pub mounts: Vec, + + /// Initial console size, as an `[height, width]` array. + #[serde(default, rename = "ConsoleSize")] + pub console_size: Vec, + + /// Arbitrary non-identifying metadata attached to container and provided to the runtime when the container is started. + #[serde(default, rename = "Annotations")] + pub annotations: HashMap, + + /// A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. + #[serde(default, rename = "CapAdd")] + pub cap_add: Vec, + + /// A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. + #[serde(default, rename = "CapDrop")] + pub cap_drop: Vec, + + /// cgroup namespace mode for the container. Possible values are: - `\"private\"`: the container runs in its own private cgroup namespace - `\"host\"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `\"private\"` or `\"host\"`, depending on daemon version, kernel support and configuration. + #[serde(rename = "CgroupnsMode")] + pub cgroupns_mode: Option, + + /// A list of DNS servers for the container to use. + #[serde(default, rename = "Dns")] + pub dns: Vec, + + /// A list of DNS options. + #[serde(default, rename = "DnsOptions")] + pub dns_options: Vec, + + /// A list of DNS search domains. + #[serde(default, rename = "DnsSearch")] + pub dns_search: Vec, + + /// A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `[\"hostname:IP\"]`. + #[serde(default, rename = "ExtraHosts")] + pub extra_hosts: Vec, + + /// A list of additional groups that the container process will run as. + #[serde(default, rename = "GroupAdd")] + pub group_add: Vec, + + /// IPC sharing mode for the container. Possible values are: - `\"none\"`: own private IPC namespace, with /dev/shm not mounted - `\"private\"`: own private IPC namespace - `\"shareable\"`: own private IPC namespace, with a possibility to share it with other containers - `\"container:\"`: join another (shareable) container's IPC namespace - `\"host\"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `\"private\"` or `\"shareable\"`, depending on daemon version and configuration. + #[serde(rename = "IpcMode")] + pub ipc_mode: Option, + + /// Cgroup to use for the container. + #[serde(rename = "Cgroup")] + pub cgroup: Option, + + /// A list of links for the container in the form `container_name:alias`. + #[serde(default, rename = "Links")] + pub links: Vec, + + /// An integer value containing the score given to the container in order to tune OOM killer preferences. + #[serde(rename = "OomScoreAdj")] + pub oom_score_adj: Option, + + /// Set the PID (Process) Namespace mode for the container. It can be either: - `\"container:\"`: joins another container's PID namespace - `\"host\"`: use the host's PID namespace inside the container + #[serde(rename = "PidMode")] + pub pid_mode: Option, + + /// Gives the container full access to the host. + #[serde(rename = "Privileged")] + pub privileged: Option, + + /// Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. + #[serde(rename = "PublishAllPorts")] + pub publish_all_ports: Option, + + /// Mount the container's root filesystem as read only. + #[serde(rename = "ReadonlyRootfs")] + pub readonly_rootfs: Option, + + /// A list of string values to customize labels for MLS systems, such as SELinux. + #[serde(default, rename = "SecurityOpt")] + pub security_opt: Vec, + + /// Storage driver options for this container, in the form `{\"size\": \"120G\"}`. + #[serde(default, rename = "StorageOpt")] + pub storage_opt: HashMap, + + /// A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { \"/run\": \"rw,noexec,nosuid,size=65536k\" } ``` + #[serde(default, rename = "Tmpfs")] + pub tmpfs: HashMap, + + /// UTS namespace to use for the container. + #[serde(rename = "UTSMode")] + pub uts_mode: Option, + + /// Sets the usernamespace mode for the container when usernamespace remapping option is enabled. + #[serde(rename = "UsernsMode")] + pub userns_mode: Option, + + /// Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + #[serde(rename = "ShmSize")] + pub shm_size: Option, + + /// A list of kernel parameters (sysctls) to set in the container. For example: ``` {\"net.ipv4.ip_forward\": \"1\"} ``` + #[serde(default, rename = "Sysctls")] + pub sysctls: HashMap, + + /// Runtime to use with this container. + #[serde(rename = "Runtime")] + pub runtime: Option, + + /// Isolation technology of the container. (Windows only) + #[serde(default, rename = "Isolation")] + pub isolation: HostConfigIsolationEnum, + + /// The list of paths to be masked inside the container (this overrides the default set of paths). + #[serde(default, rename = "MaskedPaths")] + pub masked_paths: Vec, + + /// The list of paths to be set as read-only inside the container (this overrides the default set of paths). + #[serde(default, rename = "ReadonlyPaths")] + pub readonly_paths: Vec, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ResourcesBlkioWeightDevice { + #[serde(rename = "Path")] + pub path: Option, + + #[serde(rename = "Weight")] + pub weight: Option, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ThrottleDevice { + /// Device path + #[serde(rename = "Path")] + pub path: Option, + + /// Rate + #[serde(rename = "Rate")] + pub rate: Option, +} + +/// A device mapping between the host and container +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct DeviceMapping { + #[serde(rename = "PathOnHost")] + pub path_on_host: Option, + + #[serde(rename = "PathInContainer")] + pub path_in_container: Option, + + #[serde(rename = "CgroupPermissions")] + pub cgroup_permissions: Option, +} + +/// A request for devices to be sent to device drivers +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct DeviceRequest { + #[serde(rename = "Driver")] + pub driver: Option, + + #[serde(rename = "Count")] + pub count: Option, + + #[serde(default, rename = "DeviceIDs")] + pub device_ids: Vec, + + /// A list of capabilities; an OR list of AND lists of capabilities. + #[serde(default, rename = "Capabilities")] + pub capabilities: Vec>, + + /// Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. + #[serde(default, rename = "Options")] + pub options: HashMap, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ResourcesUlimits { + /// Name of ulimit + #[serde(rename = "Name")] + pub name: Option, + + /// Soft limit + #[serde(rename = "Soft")] + pub soft: Option, + + /// Hard limit + #[serde(rename = "Hard")] + pub hard: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum HostConfigIsolationEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "default")] + Default, + #[serde(rename = "process")] + Process, + #[serde(rename = "hyperv")] + Hyperv, +} + +/// The logging configuration for this container +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct HostConfigLogConfig { + #[serde(rename = "Type")] + pub typ: Option, + + #[serde(default, rename = "Config")] + pub config: HashMap, +} + +/// The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct RestartPolicy { + /// - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero + #[serde(default, rename = "Name")] + pub name: RestartPolicyNameEnum, + + /// If `on-failure` is used, the number of times to retry before giving up. + #[serde(rename = "MaximumRetryCount")] + pub maximum_retry_count: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum RestartPolicyNameEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "no")] + No, + #[serde(rename = "always")] + Always, + #[serde(rename = "unless-stopped")] + UnlessStopped, + #[serde(rename = "on-failure")] + OnFailure, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ContainerMount { + /// Container path. + #[serde(rename = "Target")] + pub target: Option, + + /// Mount source (e.g. a volume name, a host path). + #[serde(rename = "Source")] + pub source: Option, + + /// The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - `cluster` a Swarm cluster volume + #[serde(default, rename = "Type")] + pub typ: MountTypeEnum, + + /// Whether the mount should be read-only. + #[serde(rename = "ReadOnly")] + pub read_only: Option, + + /// The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`. + #[serde(rename = "Consistency")] + pub consistency: Option, + + #[serde(rename = "BindOptions")] + pub bind_options: Option, + + #[serde(rename = "VolumeOptions")] + pub volume_options: Option, + + #[serde(rename = "TmpfsOptions")] + pub tmpfs_options: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum MountTypeEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "bind")] + Bind, + #[serde(rename = "volume")] + Volume, + #[serde(rename = "tmpfs")] + Tmpfs, + #[serde(rename = "npipe")] + Npipe, + #[serde(rename = "cluster")] + Cluster, +} + +/// Optional configuration for the `bind` type. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct MountBindOptions { + /// A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. + #[serde(default, rename = "Propagation")] + pub propagation: MountBindOptionsPropagationEnum, + + /// Disable recursive bind mount. + #[serde(rename = "NonRecursive")] + pub non_recursive: Option, + + /// Create mount point on host if missing + #[serde(rename = "CreateMountpoint")] + pub create_mountpoint: Option, + + /// Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to `true` in conjunction). Addded in v1.44, before that version all read-only mounts were non-recursive by default. To match the previous behaviour this will default to `true` for clients on versions prior to v1.44. + #[serde(rename = "ReadOnlyNonRecursive")] + pub read_only_non_recursive: Option, + + /// Raise an error if the mount cannot be made recursively read-only. + #[serde(rename = "ReadOnlyForceRecursive")] + pub read_only_force_recursive: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum MountBindOptionsPropagationEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "private")] + Private, + #[serde(rename = "rprivate")] + Rprivate, + #[serde(rename = "shared")] + Shared, + #[serde(rename = "rshared")] + Rshared, + #[serde(rename = "slave")] + Slave, + #[serde(rename = "rslave")] + Rslave, +} + +/// Optional configuration for the `volume` type. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct MountVolumeOptions { + /// Populate volume with data from the target. + #[serde(rename = "NoCopy")] + pub no_copy: Option, + + /// User-defined key/value metadata. + #[serde(default, rename = "Labels")] + pub labels: HashMap, + + #[serde(rename = "DriverConfig")] + pub driver_config: Option, + + /// Source path inside the volume. Must be relative without any back traversals. + #[serde(rename = "Subpath")] + pub subpath: Option, +} + +/// Map of driver specific options +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct MountVolumeOptionsDriverConfig { + /// Name of the driver to use to create the volume. + #[serde(rename = "Name")] + pub name: Option, + + /// key/value map of driver specific options. + #[serde(default, rename = "Options")] + pub options: HashMap, +} + +/// Optional configuration for the `tmpfs` type. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct MountTmpfsOptions { + /// The size for the tmpfs mount in bytes. + #[serde(rename = "SizeBytes")] + pub size_bytes: Option, + + /// The permission mode for the tmpfs mount in an integer. + #[serde(rename = "Mode")] + pub mode: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum HostConfigCgroupnsModeEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "private")] + Private, + #[serde(rename = "host")] + Host, +} + +/// MountPoint represents a mount point configuration inside the container. This is used for reporting the mountpoints in use by a container. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct MountPoint { + /// The mount type: - `bind` a mount of a file or directory from the host into the container. - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - `cluster` a Swarm cluster volume + #[serde(default, rename = "Type")] + pub typ: MountTypeEnum, + + /// Name is the name reference to the underlying data defined by `Source` e.g., the volume name. + #[serde(rename = "Name")] + pub name: Option, + + /// Source location of the mount. For volumes, this contains the storage location of the volume (within `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains the source (host) part of the bind-mount. For `tmpfs` mount points, this field is empty. + #[serde(rename = "Source")] + pub source: Option, + + /// Destination is the path relative to the container root (`/`) where the `Source` is mounted inside the container. + #[serde(rename = "Destination")] + pub destination: Option, + + /// Driver is the volume driver used to create the volume (if it is a volume). + #[serde(rename = "Driver")] + pub driver: Option, + + /// Mode is a comma separated list of options supplied by the user when creating the bind/volume mount. The default is platform-specific (`\"z\"` on Linux, empty on Windows). + #[serde(rename = "Mode")] + pub mode: Option, + + /// Whether the mount is mounted writable (read-write). + #[serde(rename = "RW")] + pub rw: Option, + + /// Propagation describes how mounts are propagated from the host into the mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) for details. This field is not used on Windows. + #[serde(rename = "Propagation")] + pub propagation: Option, +} + +/// NetworkSettings exposes the network settings in the API +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct NetworkSettings { + /// Name of the default bridge interface when dockerd's --bridge flag is set. + #[serde(rename = "Bridge")] + pub bridge: Option, + + /// SandboxID uniquely represents a container's network stack. + #[serde(rename = "SandboxID")] + pub sandbox_id: Option, + + #[serde(default, rename = "Ports")] + pub ports: HashMap>, + + /// SandboxKey is the full path of the netns handle + #[serde(rename = "SandboxKey")] + pub sandbox_key: Option, + + /// Information about all networks that the container is connected to. + #[serde(default, rename = "Networks")] + pub networks: HashMap, +} + +/// Configuration for a network endpoint. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct EndpointSettings { + #[serde(rename = "IPAMConfig")] + pub ipam_config: Option, + + #[serde(default, rename = "Links")] + pub links: Vec, + + /// MAC address for the endpoint on this network. The network driver might ignore this parameter. + #[serde(rename = "MacAddress")] + pub mac_address: Option, + + #[serde(default, rename = "Aliases")] + pub aliases: Vec, + + /// Unique ID of the network. + #[serde(rename = "NetworkID")] + pub network_id: Option, + + /// Unique ID for the service endpoint in a Sandbox. + #[serde(rename = "EndpointID")] + pub endpoint_id: Option, + + /// Gateway address for this network. + #[serde(rename = "Gateway")] + pub gateway: Option, + + /// IPv4 address. + #[serde(rename = "IPAddress")] + pub ip_address: Option, + + /// Mask length of the IPv4 address. + #[serde(rename = "IPPrefixLen")] + pub ip_prefix_len: Option, + + /// IPv6 gateway address. + #[serde(rename = "IPv6Gateway")] + pub ipv6_gateway: Option, + + /// Global IPv6 address. + #[serde(rename = "GlobalIPv6Address")] + pub global_ipv6_address: Option, + + /// Mask length of the global IPv6 address. + #[serde(rename = "GlobalIPv6PrefixLen")] + pub global_ipv6_prefix_len: Option, + + /// DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. + #[serde(default, rename = "DriverOpts")] + pub driver_opts: HashMap, + + /// List of all DNS names an endpoint has on a specific network. This list is based on the container name, network aliases, container short ID, and hostname. These DNS names are non-fully qualified but can contain several dots. You can get fully qualified DNS names by appending `.`. For instance, if container name is `my.ctr` and the network is named `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be `my.ctr.testnet`. + #[serde(default, rename = "DNSNames")] + pub dns_names: Vec, +} + +/// EndpointIPAMConfig represents an endpoint's IPAM configuration. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct EndpointIpamConfig { + #[serde(rename = "IPv4Address")] + pub ipv4_address: Option, + + #[serde(rename = "IPv6Address")] + pub ipv6_address: Option, + + #[serde(default, rename = "LinkLocalIPs")] + pub link_local_ips: Vec, +} + +#[typeshare] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ContainerStats { + #[serde(alias = "Name")] + pub name: String, + #[serde(alias = "CPUPerc")] + pub cpu_perc: String, + #[serde(alias = "MemPerc")] + pub mem_perc: String, + #[serde(alias = "MemUsage")] + pub mem_usage: String, + #[serde(alias = "NetIO")] + pub net_io: String, + #[serde(alias = "BlockIO")] + pub block_io: String, + #[serde(alias = "PIDs")] + pub pids: String, +} diff --git a/client/core/rs/src/entities/docker/image.rs b/client/core/rs/src/entities/docker/image.rs new file mode 100644 index 000000000..5597448ec --- /dev/null +++ b/client/core/rs/src/entities/docker/image.rs @@ -0,0 +1,147 @@ +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::I64; + +use super::{ContainerConfig, GraphDriverData}; + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ImageListItem { + /// The first tag in `repo_tags`, or Id if no tags. + pub name: String, + /// ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. + pub id: String, + /// ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. + pub parent_id: String, + /// Date and time at which the image was created as a Unix timestamp (number of seconds sinds EPOCH). + pub created: I64, + /// Total size of the image including all layers it is composed of. + pub size: I64, + /// Whether the image is in use by any container + pub in_use: bool, +} + +/// Information about an image in the local image cache. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct Image { + /// ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. + #[serde(rename = "Id")] + pub id: Option, + + /// List of image names/tags in the local image cache that reference this image. Multiple image tags can refer to the same image, and this list may be empty if no tags reference the image, in which case the image is \"untagged\", in which case it can still be referenced by its ID. + #[serde(default, rename = "RepoTags")] + pub repo_tags: Vec, + + /// List of content-addressable digests of locally available image manifests that the image is referenced from. Multiple manifests can refer to the same image. These digests are usually only available if the image was either pulled from a registry, or if the image was pushed to a registry, which is when the manifest is generated and its digest calculated. + #[serde(default, rename = "RepoDigests")] + pub repo_digests: Vec, + + /// ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. + #[serde(rename = "Parent")] + pub parent: Option, + + /// Optional message that was set when committing or importing the image. + #[serde(rename = "Comment")] + pub comment: Option, + + /// Date and time at which the image was created, formatted in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. This information is only available if present in the image, and omitted otherwise. + #[serde(rename = "Created")] + pub created: Option, + + /// The version of Docker that was used to build the image. Depending on how the image was created, this field may be empty. + #[serde(rename = "DockerVersion")] + pub docker_version: Option, + + /// Name of the author that was specified when committing the image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + #[serde(rename = "Author")] + pub author: Option, + + /// Configuration for a container that is portable between hosts. + #[serde(rename = "Config")] + #[serde(skip_serializing_if = "Option::is_none")] + pub config: Option, + + /// Hardware CPU architecture that the image runs on. + #[serde(rename = "Architecture")] + pub architecture: Option, + + /// CPU architecture variant (presently ARM-only). + #[serde(rename = "Variant")] + pub variant: Option, + + /// Operating System the image is built to run on. + #[serde(rename = "Os")] + pub os: Option, + + /// Operating System version the image is built to run on (especially for Windows). + #[serde(rename = "OsVersion")] + pub os_version: Option, + + /// Total size of the image including all layers it is composed of. + #[serde(rename = "Size")] + pub size: Option, + + #[serde(rename = "GraphDriver")] + pub graph_driver: Option, + + #[serde(rename = "RootFS")] + pub root_fs: Option, + + #[serde(rename = "Metadata")] + pub metadata: Option, +} + +/// Information about the image's RootFS, including the layer IDs. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ImageInspectRootFs { + #[serde(default, rename = "Type")] + pub typ: String, + + #[serde(default, rename = "Layers")] + pub layers: Vec, +} + +/// Additional metadata of the image in the local cache. This information is local to the daemon, and not part of the image itself. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ImageInspectMetadata { + /// Date and time at which the image was last tagged in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. This information is only available if the image was tagged locally, and omitted otherwise. + #[serde(rename = "LastTagTime")] + pub last_tag_time: Option, +} + +/// individual image layer information in response to ImageHistory operation +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ImageHistoryResponseItem { + #[serde(rename = "Id")] + pub id: String, + + #[serde(rename = "Created")] + pub created: I64, + + #[serde(rename = "CreatedBy")] + pub created_by: String, + + #[serde(default, rename = "Tags")] + pub tags: Vec, + + #[serde(rename = "Size")] + pub size: I64, + + #[serde(rename = "Comment")] + pub comment: String, +} diff --git a/client/core/rs/src/entities/docker/mod.rs b/client/core/rs/src/entities/docker/mod.rs new file mode 100644 index 000000000..a868af677 --- /dev/null +++ b/client/core/rs/src/entities/docker/mod.rs @@ -0,0 +1,177 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use super::I64; + +pub mod container; +pub mod image; +pub mod network; +pub mod volume; + +/// PortBinding represents a binding between a host IP address and a host port. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct PortBinding { + /// Host IP address that the container's port is mapped to. + #[serde(rename = "HostIp")] + pub host_ip: Option, + + /// Host port number that the container's port is mapped to. + #[serde(rename = "HostPort")] + pub host_port: Option, +} + +/// Information about the storage driver used to store the container's and image's filesystem. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct GraphDriverData { + /// Name of the storage driver. + #[serde(default, rename = "Name")] + pub name: String, + /// Low-level storage metadata, provided as key/value pairs. This information is driver-specific, and depends on the storage-driver in use, and should be used for informational purposes only. + #[serde(default, rename = "Data")] + pub data: HashMap, +} + +/// Configuration for a container that is portable between hosts. When used as `ContainerConfig` field in an image, `ContainerConfig` is an optional field containing the configuration of the container that was last committed when creating the image. Previous versions of Docker builder used this field to store build cache, and it is not in active use anymore. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ContainerConfig { + /// The hostname to use for the container, as a valid RFC 1123 hostname. + #[serde(rename = "Hostname")] + pub hostname: Option, + + /// The domain name to use for the container. + #[serde(rename = "Domainname")] + pub domainname: Option, + + /// The user that commands are run as inside the container. + #[serde(rename = "User")] + pub user: Option, + + /// Whether to attach to `stdin`. + #[serde(rename = "AttachStdin")] + pub attach_stdin: Option, + + /// Whether to attach to `stdout`. + #[serde(rename = "AttachStdout")] + pub attach_stdout: Option, + + /// Whether to attach to `stderr`. + #[serde(rename = "AttachStderr")] + pub attach_stderr: Option, + + /// An object mapping ports to an empty object in the form: `{\"/\": {}}` + #[serde(default, rename = "ExposedPorts")] + pub exposed_ports: HashMap>, + + /// Attach standard streams to a TTY, including `stdin` if it is not closed. + #[serde(rename = "Tty")] + pub tty: Option, + + /// Open `stdin` + #[serde(rename = "OpenStdin")] + pub open_stdin: Option, + + /// Close `stdin` after one attached client disconnects + #[serde(rename = "StdinOnce")] + pub stdin_once: Option, + + /// A list of environment variables to set inside the container in the form `[\"VAR=value\", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + #[serde(default, rename = "Env")] + pub env: Vec, + + /// Command to run specified as a string or an array of strings. + #[serde(default, rename = "Cmd")] + pub cmd: Vec, + + #[serde(rename = "Healthcheck")] + pub healthcheck: Option, + + /// Command is already escaped (Windows only) + #[serde(rename = "ArgsEscaped")] + pub args_escaped: Option, + + /// The name (or reference) of the image to use when creating the container, or which was used when the container was created. + #[serde(rename = "Image")] + pub image: Option, + + /// An object mapping mount point paths inside the container to empty objects. + #[serde(default, rename = "Volumes")] + pub volumes: HashMap>, + + /// The working directory for commands to run in. + #[serde(rename = "WorkingDir")] + pub working_dir: Option, + + /// The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[\"\"]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + #[serde(default, rename = "Entrypoint")] + pub entrypoint: Vec, + + /// Disable networking for the container. + #[serde(rename = "NetworkDisabled")] + pub network_disabled: Option, + + /// MAC address of the container. Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. + #[serde(rename = "MacAddress")] + pub mac_address: Option, + + /// `ONBUILD` metadata that were defined in the image's `Dockerfile`. + #[serde(default, rename = "OnBuild")] + pub on_build: Vec, + + /// User-defined key/value metadata. + #[serde(default, rename = "Labels")] + pub labels: HashMap, + + /// Signal to stop a container as a string or unsigned integer. + #[serde(rename = "StopSignal")] + pub stop_signal: Option, + + /// Timeout to stop a container in seconds. + #[serde(rename = "StopTimeout")] + pub stop_timeout: Option, + + /// Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + #[serde(default, rename = "Shell")] + pub shell: Vec, +} + +/// A test to perform to check that the container is healthy. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct HealthConfig { + /// The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `[\"NONE\"]` disable healthcheck - `[\"CMD\", args...]` exec arguments directly - `[\"CMD-SHELL\", command]` run command with system's default shell + #[serde(default, rename = "Test")] + pub test: Vec, + + /// The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + #[serde(rename = "Interval")] + pub interval: Option, + + /// The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + #[serde(rename = "Timeout")] + pub timeout: Option, + + /// The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. + #[serde(rename = "Retries")] + pub retries: Option, + + /// Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + #[serde(rename = "StartPeriod")] + pub start_period: Option, + + /// The time to wait between checks in nanoseconds during the start period. It should be 0 or at least 1000000 (1 ms). 0 means inherit. + #[serde(rename = "StartInterval")] + pub start_interval: Option, +} \ No newline at end of file diff --git a/client/core/rs/src/entities/docker/network.rs b/client/core/rs/src/entities/docker/network.rs new file mode 100644 index 000000000..e60a51c12 --- /dev/null +++ b/client/core/rs/src/entities/docker/network.rs @@ -0,0 +1,114 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +#[typeshare] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NetworkListItem { + pub name: Option, + pub id: Option, + pub created: Option, + pub scope: Option, + pub driver: Option, + pub enable_ipv6: Option, + pub ipam_driver: Option, + pub ipam_subnet: Option, + pub ipam_gateway: Option, + pub internal: Option, + pub attachable: Option, + pub ingress: Option, + /// Whether the network is attached to one or more containers + pub in_use: bool, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct Network { + #[serde(rename = "Name")] + pub name: Option, + + #[serde(rename = "Id")] + pub id: Option, + + #[serde(rename = "Created")] + pub created: Option, + + #[serde(rename = "Scope")] + pub scope: Option, + + #[serde(rename = "Driver")] + pub driver: Option, + + #[serde(rename = "EnableIPv6")] + pub enable_ipv6: Option, + + #[serde(rename = "IPAM")] + pub ipam: Option, + + #[serde(rename = "Internal")] + pub internal: Option, + + #[serde(rename = "Attachable")] + pub attachable: Option, + + #[serde(rename = "Ingress")] + pub ingress: Option, + + /// This field is turned from map into array for easier usability. + #[serde(rename = "Containers")] + pub containers: Vec, + + #[serde(default, rename = "Options")] + pub options: HashMap, + + #[serde(default, rename = "Labels")] + pub labels: HashMap, +} + +#[typeshare] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Ipam { + /// Name of the IPAM driver to use. + #[serde(rename = "Driver")] + pub driver: Option, + /// List of IPAM configuration options, specified as a map: ``` {\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": } ``` + #[serde(rename = "Config")] + pub config: Vec, + /// Driver-specific options, specified as a map. + #[serde(rename = "Options")] + pub options: HashMap, +} + +#[typeshare] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IpamConfig { + #[serde(rename = "Subnet")] + pub subnet: Option, + #[serde(rename = "IPRange")] + pub ip_range: Option, + #[serde(rename = "Gateway")] + pub gateway: Option, + #[serde(rename = "AuxiliaryAddresses")] + pub auxiliary_addresses: HashMap, +} + +#[typeshare] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct NetworkContainer { + /// This is the key on the incoming map of NetworkContainer + #[serde(default, rename = "ContainerID")] + pub container_id: String, + #[serde(rename = "Name")] + pub name: Option, + #[serde(rename = "EndpointID")] + pub endpoint_id: Option, + #[serde(rename = "MacAddress")] + pub mac_address: Option, + #[serde(rename = "IPv4Address")] + pub ipv4_address: Option, + #[serde(rename = "IPv6Address")] + pub ipv6_address: Option, +} diff --git a/client/core/rs/src/entities/docker/volume.rs b/client/core/rs/src/entities/docker/volume.rs new file mode 100644 index 000000000..25001043b --- /dev/null +++ b/client/core/rs/src/entities/docker/volume.rs @@ -0,0 +1,383 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::{I64, U64}; + +use super::PortBinding; + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct VolumeListItem { + /// The name of the volume + pub name: String, + pub driver: String, + pub mountpoint: String, + pub created: Option, + pub scope: VolumeScopeEnum, + /// Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `\"local\"` volume driver. For volumes created with other volume drivers, this field is set to `-1` (\"not available\") + pub size: Option, + /// Whether the volume is currently attached to any container + pub in_use: bool, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct Volume { + /// Name of the volume. + #[serde(rename = "Name")] + pub name: String, + + /// Name of the volume driver used by the volume. + #[serde(rename = "Driver")] + pub driver: String, + + /// Mount path of the volume on the host. + #[serde(rename = "Mountpoint")] + pub mountpoint: String, + + /// Date/Time the volume was created. + #[serde(rename = "CreatedAt")] + pub created_at: Option, + + /// Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{\"key\":\"value\",\"key2\":\"value2\"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. + #[serde(default, rename = "Status")] + pub status: HashMap>, + + /// User-defined key/value metadata. + #[serde(default, rename = "Labels")] + pub labels: HashMap, + + /// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + #[serde(default, rename = "Scope")] + pub scope: VolumeScopeEnum, + + #[serde(rename = "ClusterVolume")] + pub cluster_volume: Option, + + /// The driver specific options used when creating the volume. + #[serde(default, rename = "Options")] + pub options: HashMap, + + #[serde(rename = "UsageData")] + pub usage_data: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum VolumeScopeEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "local")] + Local, + #[serde(rename = "global")] + Global, +} + +/// Options and information specific to, and only present on, Swarm CSI cluster volumes. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolume { + /// The Swarm ID of this volume. Because cluster volumes are Swarm objects, they have an ID, unlike non-cluster volumes. This ID can be used to refer to the Volume instead of the name. + #[serde(rename = "ID")] + pub id: Option, + + #[serde(rename = "Version")] + pub version: Option, + + #[serde(rename = "CreatedAt")] + pub created_at: Option, + + #[serde(rename = "UpdatedAt")] + pub updated_at: Option, + + #[serde(rename = "Spec")] + pub spec: Option, + + #[serde(rename = "Info")] + pub info: Option, + + /// The status of the volume as it pertains to its publishing and use on specific nodes + #[serde(default, rename = "PublishStatus")] + pub publish_status: Vec, +} + +/// Information about the global status of the volume. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumeInfo { + /// The capacity of the volume in bytes. A value of 0 indicates that the capacity is unknown. + #[serde(rename = "CapacityBytes")] + pub capacity_bytes: Option, + + /// A map of strings to strings returned from the storage plugin when the volume is created. + #[serde(default, rename = "VolumeContext")] + pub volume_context: HashMap, + + /// The ID of the volume as returned by the CSI storage plugin. This is distinct from the volume's ID as provided by Docker. This ID is never used by the user when communicating with Docker to refer to this volume. If the ID is blank, then the Volume has not been successfully created in the plugin yet. + #[serde(rename = "VolumeID")] + pub volume_id: Option, + + /// The topology this volume is actually accessible from. + #[serde(default, rename = "AccessibleTopology")] + pub accessible_topology: Vec, +} + +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumePublishStatus { + /// The ID of the Swarm node the volume is published on. + #[serde(rename = "NodeID")] + pub node_id: Option, + + /// The published state of the volume. * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. * `published` The volume is published successfully to the node. * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + #[serde(default, rename = "State")] + pub state: ClusterVolumePublishStatusStateEnum, + + /// A map of strings to strings returned by the CSI controller plugin when a volume is published. + #[serde(default, rename = "PublishContext")] + pub publish_context: HashMap, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum ClusterVolumePublishStatusStateEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "pending-publish")] + PendingPublish, + #[serde(rename = "published")] + Published, + #[serde(rename = "pending-node-unpublish")] + PendingNodeUnpublish, + #[serde(rename = "pending-controller-unpublish")] + PendingControllerUnpublish, +} + +/// The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ObjectVersion { + #[serde(rename = "Index")] + pub index: Option, +} + +/// Cluster-specific options used to create the volume. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumeSpec { + /// Group defines the volume group of this volume. Volumes belonging to the same group can be referred to by group name when creating Services. Referring to a volume by group instructs Swarm to treat volumes in that group interchangeably for the purpose of scheduling. Volumes with an empty string for a group technically all belong to the same, emptystring group. + #[serde(rename = "Group")] + pub group: Option, + + #[serde(rename = "AccessMode")] + pub access_mode: Option, +} + +/// Defines how the volume is used by tasks. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumeSpecAccessMode { + /// The set of nodes this volume can be used on at one time. - `single` The volume may only be scheduled to one node at a time. - `multi` the volume may be scheduled to any supported number of nodes at a time. + #[serde(default, rename = "Scope")] + pub scope: ClusterVolumeSpecAccessModeScopeEnum, + + /// The number and way that different tasks can use this volume at one time. - `none` The volume may only be used by one task at a time. - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - `all` The volume may have any number of readers and writers. + #[serde(default, rename = "Sharing")] + pub sharing: ClusterVolumeSpecAccessModeSharingEnum, + + /// Swarm Secrets that are passed to the CSI storage plugin when operating on this volume. + #[serde(default, rename = "Secrets")] + pub secrets: Vec, + + #[serde(rename = "AccessibilityRequirements")] + pub accessibility_requirements: + Option, + + #[serde(rename = "CapacityRange")] + pub capacity_range: + Option, + + /// The availability of the volume for use in tasks. - `active` The volume is fully available for scheduling on the cluster - `pause` No new workloads should use the volume, but existing workloads are not stopped. - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + #[serde(default, rename = "Availability")] + pub availability: ClusterVolumeSpecAccessModeAvailabilityEnum, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum ClusterVolumeSpecAccessModeScopeEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "single")] + Single, + #[serde(rename = "multi")] + Multi, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum ClusterVolumeSpecAccessModeSharingEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "none")] + None, + #[serde(rename = "readonly")] + Readonly, + #[serde(rename = "onewriter")] + Onewriter, + #[serde(rename = "all")] + All, +} + +/// One cluster volume secret entry. Defines a key-value pair that is passed to the plugin. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumeSpecAccessModeSecrets { + /// Key is the name of the key of the key-value pair passed to the plugin. + #[serde(rename = "Key")] + pub key: Option, + + /// Secret is the swarm Secret object from which to read data. This can be a Secret name or ID. The Secret data is retrieved by swarm and used as the value of the key-value pair passed to the plugin. + #[serde(rename = "Secret")] + pub secret: Option, +} + +/// Requirements for the accessible topology of the volume. These fields are optional. For an in-depth description of what these fields mean, see the CSI specification. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumeSpecAccessModeAccessibilityRequirements { + /// A list of required topologies, at least one of which the volume must be accessible from. + #[serde(default, rename = "Requisite")] + pub requisite: Vec, + + /// A list of topologies that the volume should attempt to be provisioned in. + #[serde(default, rename = "Preferred")] + pub preferred: Vec, +} + +#[typeshare] +pub type Topology = HashMap>; + +/// The desired capacity that the volume should be created with. If empty, the plugin will decide the capacity. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct ClusterVolumeSpecAccessModeCapacityRange { + /// The volume must be at least this big. The value of 0 indicates an unspecified minimum + #[serde(rename = "RequiredBytes")] + pub required_bytes: Option, + + /// The volume must not be bigger than this. The value of 0 indicates an unspecified maximum. + #[serde(rename = "LimitBytes")] + pub limit_bytes: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + PartialEq, + PartialOrd, + Serialize, + Deserialize, + Eq, + Ord, + Default, +)] +pub enum ClusterVolumeSpecAccessModeAvailabilityEnum { + #[default] + #[serde(rename = "")] + Empty, + #[serde(rename = "active")] + Active, + #[serde(rename = "pause")] + Pause, + #[serde(rename = "drain")] + Drain, +} + +/// Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. +#[typeshare] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, +)] +pub struct VolumeUsageData { + /// Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `\"local\"` volume driver. For volumes created with other volume drivers, this field is set to `-1` (\"not available\") + #[serde(rename = "Size")] + pub size: I64, + + /// The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. + #[serde(rename = "RefCount")] + pub ref_count: I64, +} diff --git a/client/core/rs/src/entities/logger.rs b/client/core/rs/src/entities/logger.rs index a3c6f3b1c..4df19cda6 100644 --- a/client/core/rs/src/entities/logger.rs +++ b/client/core/rs/src/entities/logger.rs @@ -18,7 +18,7 @@ pub struct LogConfig { } fn default_opentelemetry_service_name() -> String { - String::from("Monitor") + String::from("Komodo") } impl Default for LogConfig { diff --git a/client/core/rs/src/entities/mod.rs b/client/core/rs/src/entities/mod.rs index a1ce6b226..633953ca1 100644 --- a/client/core/rs/src/entities/mod.rs +++ b/client/core/rs/src/entities/mod.rs @@ -6,6 +6,7 @@ use build::StandardRegistryConfig; use clap::Parser; use config::core::AwsEcrConfig; use derive_empty_traits::EmptyTraits; +use derive_variants::{EnumVariants, ExtractVariant}; use serde::{ de::{ value::{MapAccessDeserializer, SeqAccessDeserializer}, @@ -31,6 +32,8 @@ pub mod builder; pub mod config; /// Subtypes of [Deployment][deployment::Deployment]. pub mod deployment; +/// Networks, Images, Containers. +pub mod docker; /// Subtypes of [LogConfig][logger::LogConfig]. pub mod logger; /// Subtypes of [Permission][permission::Permission]. @@ -49,6 +52,8 @@ pub mod server; pub mod server_template; /// Subtypes of [Stack][stack::Stack] pub mod stack; +/// Subtypes for server stats reporting. +pub mod stats; /// Subtypes of [ResourceSync][sync::ResourceSync] pub mod sync; /// Subtypes of [Tag][tag::Tag]. @@ -68,6 +73,8 @@ pub mod variable; pub type I64 = i64; #[typeshare(serialized_as = "number")] pub type U64 = u64; +#[typeshare(serialized_as = "number")] +pub type Usize = usize; #[typeshare(serialized_as = "any")] pub type MongoDocument = bson::Document; #[typeshare(serialized_as = "any")] @@ -127,9 +134,9 @@ pub fn get_image_name( aws_ecr: impl FnOnce(&String) -> Option, ) -> anyhow::Result { let name = if image_name.is_empty() { - to_monitor_name(name) + to_komodo_name(name) } else { - to_monitor_name(image_name) + to_komodo_name(image_name) }; let name = match image_registry { build::ImageRegistry::None(_) => name, @@ -159,11 +166,11 @@ pub fn get_image_name( Ok(name) } -pub fn to_monitor_name(name: &str) -> String { +pub fn to_komodo_name(name: &str) -> String { name.to_lowercase().replace([' ', '.'], "_") } -pub fn monitor_timestamp() -> i64 { +pub fn komodo_timestamp() -> i64 { unix_timestamp_ms() as i64 } @@ -710,12 +717,26 @@ pub enum Operation { UpdateServer, DeleteServer, RenameServer, - PruneImages, + StartContainer, + RestartContainer, + PauseContainer, + UnpauseContainer, + StopContainer, + DestroyContainer, + StartAllContainers, + RestartAllContainers, + PauseAllContainers, + UnpauseAllContainers, + StopAllContainers, PruneContainers, - PruneNetworks, CreateNetwork, DeleteNetwork, - StopAllContainers, + PruneNetworks, + DeleteImage, + PruneImages, + DeleteVolume, + PruneVolumes, + PruneSystem, // build CreateBuild, @@ -734,12 +755,12 @@ pub enum Operation { UpdateDeployment, DeleteDeployment, Deploy, - StartContainer, - RestartContainer, - PauseContainer, - UnpauseContainer, - StopContainer, - RemoveContainer, + StartDeployment, + RestartDeployment, + PauseDeployment, + UnpauseDeployment, + StopDeployment, + DestroyDeployment, RenameDeployment, // repo @@ -830,3 +851,164 @@ pub enum SearchCombinator { Or, And, } + +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + PartialEq, + Hash, + Eq, + Clone, + Copy, + Default, + Display, + EnumString, +)] +#[serde(rename_all = "UPPERCASE")] +#[strum(serialize_all = "UPPERCASE")] +pub enum TerminationSignal { + #[serde(alias = "1")] + SigHup, + #[serde(alias = "2")] + SigInt, + #[serde(alias = "3")] + SigQuit, + #[default] + #[serde(alias = "15")] + SigTerm, +} + +/// Used to reference a specific resource across all resource types +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Hash, + Serialize, + Deserialize, + EnumVariants, +)] +#[variant_derive( + Debug, + Clone, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + Display, + EnumString, + AsRefStr +)] +#[serde(tag = "type", content = "id")] +pub enum ResourceTarget { + System(String), + Build(String), + Builder(String), + Deployment(String), + Server(String), + Repo(String), + Alerter(String), + Procedure(String), + ServerTemplate(String), + ResourceSync(String), + Stack(String), +} + +impl ResourceTarget { + pub fn extract_variant_id( + &self, + ) -> (ResourceTargetVariant, &String) { + let id = match &self { + ResourceTarget::System(id) => id, + ResourceTarget::Build(id) => id, + ResourceTarget::Builder(id) => id, + ResourceTarget::Deployment(id) => id, + ResourceTarget::Server(id) => id, + ResourceTarget::Repo(id) => id, + ResourceTarget::Alerter(id) => id, + ResourceTarget::Procedure(id) => id, + ResourceTarget::ServerTemplate(id) => id, + ResourceTarget::ResourceSync(id) => id, + ResourceTarget::Stack(id) => id, + }; + (self.extract_variant(), id) + } + + pub fn system() -> ResourceTarget { + Self::System("system".to_string()) + } +} + +impl Default for ResourceTarget { + fn default() -> Self { + ResourceTarget::system() + } +} + +impl From<&build::Build> for ResourceTarget { + fn from(build: &build::Build) -> Self { + Self::Build(build.id.clone()) + } +} + +impl From<&deployment::Deployment> for ResourceTarget { + fn from(deployment: &deployment::Deployment) -> Self { + Self::Deployment(deployment.id.clone()) + } +} + +impl From<&server::Server> for ResourceTarget { + fn from(server: &server::Server) -> Self { + Self::Server(server.id.clone()) + } +} + +impl From<&repo::Repo> for ResourceTarget { + fn from(repo: &repo::Repo) -> Self { + Self::Repo(repo.id.clone()) + } +} + +impl From<&builder::Builder> for ResourceTarget { + fn from(builder: &builder::Builder) -> Self { + Self::Builder(builder.id.clone()) + } +} + +impl From<&alerter::Alerter> for ResourceTarget { + fn from(alerter: &alerter::Alerter) -> Self { + Self::Alerter(alerter.id.clone()) + } +} + +impl From<&procedure::Procedure> for ResourceTarget { + fn from(procedure: &procedure::Procedure) -> Self { + Self::Procedure(procedure.id.clone()) + } +} + +impl From<&server_template::ServerTemplate> for ResourceTarget { + fn from(server_template: &server_template::ServerTemplate) -> Self { + Self::ServerTemplate(server_template.id.clone()) + } +} + +impl From<&sync::ResourceSync> for ResourceTarget { + fn from(resource_sync: &sync::ResourceSync) -> Self { + Self::ResourceSync(resource_sync.id.clone()) + } +} + +impl From<&stack::Stack> for ResourceTarget { + fn from(resource_sync: &stack::Stack) -> Self { + Self::Stack(resource_sync.id.clone()) + } +} diff --git a/client/core/rs/src/entities/permission.rs b/client/core/rs/src/entities/permission.rs index d09f78254..17918fb9a 100644 --- a/client/core/rs/src/entities/permission.rs +++ b/client/core/rs/src/entities/permission.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use strum::{AsRefStr, Display, EnumString}; use typeshare::typeshare; -use super::{update::ResourceTarget, MongoId}; +use super::{ResourceTarget, MongoId}; /// Representation of a User or UserGroups permission on a resource. #[typeshare] diff --git a/client/core/rs/src/entities/repo.rs b/client/core/rs/src/entities/repo.rs index 968fb69dd..b298e0410 100644 --- a/client/core/rs/src/entities/repo.rs +++ b/client/core/rs/src/entities/repo.rs @@ -113,7 +113,7 @@ pub struct RepoConfig { /// Whether to use https to clone the repo (versus http). Default: true /// - /// Note. Monitor does not currently support cloning repos via ssh. + /// Note. Komodo does not currently support cloning repos via ssh. #[serde(default = "default_git_https")] #[builder(default = "default_git_https()")] #[partial_default(default_git_https())] diff --git a/client/core/rs/src/entities/resource.rs b/client/core/rs/src/entities/resource.rs index 6a9da84c7..8bdbc9bde 100644 --- a/client/core/rs/src/entities/resource.rs +++ b/client/core/rs/src/entities/resource.rs @@ -6,9 +6,7 @@ use typeshare::typeshare; use crate::entities::{MongoId, I64}; -use super::{ - permission::PermissionLevel, update::ResourceTargetVariant, -}; +use super::{permission::PermissionLevel, ResourceTargetVariant}; #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize, Builder)] diff --git a/client/core/rs/src/entities/server/mod.rs b/client/core/rs/src/entities/server.rs similarity index 89% rename from client/core/rs/src/entities/server/mod.rs rename to client/core/rs/src/entities/server.rs index 7090f3041..373f22998 100644 --- a/client/core/rs/src/entities/server/mod.rs +++ b/client/core/rs/src/entities/server.rs @@ -1,16 +1,15 @@ +use std::{collections::HashMap, path::PathBuf}; + use derive_builder::Builder; use partial_derive2::Partial; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use super::resource::{ - AddFilters, Resource, ResourceListItem, ResourceQuery, +use super::{ + alert::SeverityLevel, + resource::{AddFilters, Resource, ResourceListItem, ResourceQuery}, }; -pub mod docker_image; -pub mod docker_network; -pub mod stats; - #[typeshare] pub type Server = Resource; @@ -218,6 +217,15 @@ impl Default for ServerConfig { } } +/// Summary of the health of the server. +#[typeshare] +#[derive(Serialize, Deserialize, Default, Debug, Clone)] +pub struct ServerHealth { + pub cpu: SeverityLevel, + pub mem: SeverityLevel, + pub disks: HashMap, +} + /// Current pending actions on the server. #[typeshare] #[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)] @@ -228,7 +236,19 @@ pub struct ServerActionState { pub pruning_containers: bool, /// Server currently pruning images pub pruning_images: bool, - /// Server currently stopping all containers. + /// Server currently pruning images + pub pruning_volumes: bool, + /// Server currently pruning system + pub pruning_system: bool, + /// Server currently starting containers. + pub starting_containers: bool, + /// Server currently restarting containers. + pub restarting_containers: bool, + /// Server currently pausing containers. + pub pausing_containers: bool, + /// Server currently unpausing containers. + pub unpausing_containers: bool, + /// Server currently stopping containers. pub stopping_containers: bool, } diff --git a/client/core/rs/src/entities/server/docker_image.rs b/client/core/rs/src/entities/server/docker_image.rs deleted file mode 100644 index 509045df2..000000000 --- a/client/core/rs/src/entities/server/docker_image.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::collections::HashMap; - -use serde::{ - de::DeserializeOwned, Deserialize, Deserializer, Serialize, -}; -use typeshare::typeshare; - -use crate::entities::I64; - -/// Summary of a docker image cached on a server -#[typeshare] -#[derive( - Debug, Clone, Default, PartialEq, Serialize, Deserialize, -)] -pub struct ImageSummary { - /// ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. - #[serde(rename = "Id")] - pub id: String, - - /// ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. - #[serde(rename = "ParentId")] - pub parent_id: String, - - /// List of image names/tags in the local image cache that reference this image. Multiple image tags can refer to the same image, and this list may be empty if no tags reference the image, in which case the image is \"untagged\", in which case it can still be referenced by its ID. - #[serde(rename = "RepoTags")] - #[serde(deserialize_with = "deserialize_nonoptional_vec")] - pub repo_tags: Vec, - - /// List of content-addressable digests of locally available image manifests that the image is referenced from. Multiple manifests can refer to the same image. These digests are usually only available if the image was either pulled from a registry, or if the image was pushed to a registry, which is when the manifest is generated and its digest calculated. - #[serde(rename = "RepoDigests")] - #[serde(deserialize_with = "deserialize_nonoptional_vec")] - pub repo_digests: Vec, - - /// Date and time at which the image was created as a Unix timestamp (number of seconds sinds EPOCH). - #[serde(rename = "Created")] - pub created: I64, - - /// Total size of the image including all layers it is composed of. - #[serde(rename = "Size")] - pub size: I64, - - /// Total size of image layers that are shared between this image and other images. This size is not calculated by default. `-1` indicates that the value has not been set / calculated. - #[serde(rename = "SharedSize")] - pub shared_size: I64, - - /// Total size of the image including all layers it is composed of. In versions of Docker before v1.10, this field was calculated from the image itself and all of its parent images. Docker v1.10 and up store images self-contained, and no longer use a parent-chain, making this field an equivalent of the Size field. This field is kept for backward compatibility, but may be removed in a future version of the API. - #[serde(rename = "VirtualSize")] - pub virtual_size: Option, - - /// User-defined key/value metadata. - #[serde(rename = "Labels")] - #[serde(deserialize_with = "deserialize_nonoptional_map")] - pub labels: HashMap, - - /// Number of containers using this image. Includes both stopped and running containers. This size is not calculated by default, and depends on which API endpoint is used. `-1` indicates that the value has not been set / calculated. - #[serde(rename = "Containers")] - pub containers: I64, -} - -fn deserialize_nonoptional_vec< - 'de, - D: Deserializer<'de>, - T: DeserializeOwned, ->( - d: D, -) -> Result, D::Error> { - serde::Deserialize::deserialize(d) - .map(|x: Option<_>| x.unwrap_or_default()) -} - -fn deserialize_nonoptional_map< - 'de, - D: Deserializer<'de>, - T: DeserializeOwned, ->( - d: D, -) -> Result, D::Error> { - serde::Deserialize::deserialize(d) - .map(|x: Option<_>| x.unwrap_or_default()) -} - -#[cfg(feature = "docker")] -impl From for ImageSummary { - fn from(value: bollard::service::ImageSummary) -> Self { - Self { - id: value.id, - parent_id: value.parent_id, - repo_tags: value.repo_tags, - repo_digests: value.repo_digests, - created: value.created, - size: value.size, - shared_size: value.shared_size, - virtual_size: value.virtual_size, - labels: value.labels, - containers: value.containers, - } - } -} diff --git a/client/core/rs/src/entities/server/docker_network.rs b/client/core/rs/src/entities/server/docker_network.rs deleted file mode 100644 index f5d638921..000000000 --- a/client/core/rs/src/entities/server/docker_network.rs +++ /dev/null @@ -1,220 +0,0 @@ -use std::collections::HashMap; - -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use typeshare::typeshare; - -/// Summary of a docker network on a server. -#[typeshare] -#[derive( - Debug, Clone, Default, PartialEq, Serialize, Deserialize, -)] -pub struct DockerNetwork { - /// The name of the docker network - #[serde(rename = "Name")] - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, - - /// The Id of the docker network - #[serde(rename = "Id")] - #[serde(skip_serializing_if = "Option::is_none")] - pub id: Option, - - /// Timestamp network created - #[serde(rename = "Created")] - #[serde(skip_serializing_if = "Option::is_none")] - #[serde( - default, - deserialize_with = "deserialize_timestamp", - serialize_with = "serialize_timestamp" - )] - pub created: Option, - - #[serde(rename = "Scope")] - #[serde(skip_serializing_if = "Option::is_none")] - pub scope: Option, - - #[serde(rename = "Driver")] - #[serde(skip_serializing_if = "Option::is_none")] - pub driver: Option, - - #[serde(rename = "EnableIPv6")] - #[serde(skip_serializing_if = "Option::is_none")] - pub enable_ipv6: Option, - - #[serde(rename = "IPAM")] - #[serde(skip_serializing_if = "Option::is_none")] - pub ipam: Option, - - #[serde(rename = "Internal")] - #[serde(skip_serializing_if = "Option::is_none")] - pub internal: Option, - - #[serde(rename = "Attachable")] - #[serde(skip_serializing_if = "Option::is_none")] - pub attachable: Option, - - #[serde(rename = "Ingress")] - #[serde(skip_serializing_if = "Option::is_none")] - pub ingress: Option, - - #[serde(rename = "Containers")] - #[serde(skip_serializing_if = "Option::is_none")] - pub containers: Option>, - - #[serde(rename = "Options")] - #[serde(skip_serializing_if = "Option::is_none")] - pub options: Option>, - - #[serde(rename = "Labels")] - #[serde(skip_serializing_if = "Option::is_none")] - pub labels: Option>, -} - -fn deserialize_timestamp<'de, D: Deserializer<'de>>( - d: D, -) -> Result, D::Error> { - serde::Deserialize::deserialize(d) -} - -fn serialize_timestamp( - date: &Option, - s: S, -) -> Result { - match date { - Some(inner) => s.serialize_some(inner), - None => s.serialize_none(), - } -} - -#[cfg(feature = "docker")] -impl From for DockerNetwork { - fn from(value: bollard::service::Network) -> Self { - Self { - name: value.name, - id: value.id, - created: value.created, - scope: value.scope, - driver: value.driver, - enable_ipv6: value.enable_ipv6, - ipam: value.ipam.map(|ipam| ipam.into()), - internal: value.internal, - attachable: value.attachable, - ingress: value.ingress, - containers: value.containers.map(|containers| { - containers.into_iter().map(|(k, v)| (k, v.into())).collect() - }), - options: value.options, - labels: value.labels, - } - } -} - -/// Ipam related information -#[typeshare] -#[derive( - Debug, Clone, Default, PartialEq, Serialize, Deserialize, -)] -pub struct Ipam { - /// Name of the IPAM driver to use. - #[serde(rename = "Driver")] - #[serde(skip_serializing_if = "Option::is_none")] - pub driver: Option, - - /// List of IPAM configuration options, specified as a map: ``` {\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": } ``` - #[serde(rename = "Config")] - #[serde(skip_serializing_if = "Option::is_none")] - pub config: Option>, - - /// Driver-specific options, specified as a map. - #[serde(rename = "Options")] - #[serde(skip_serializing_if = "Option::is_none")] - pub options: Option>, -} - -#[cfg(feature = "docker")] -impl From for Ipam { - fn from(value: bollard::service::Ipam) -> Self { - Self { - driver: value.driver, - config: value - .config - .map(|config| config.into_iter().map(|c| c.into()).collect()), - options: value.options, - } - } -} - -/// Ipam Configuration. -#[typeshare] -#[derive( - Debug, Clone, Default, PartialEq, Serialize, Deserialize, -)] -pub struct IpamConfig { - #[serde(rename = "Subnet")] - #[serde(skip_serializing_if = "Option::is_none")] - pub subnet: Option, - - #[serde(rename = "IPRange")] - #[serde(skip_serializing_if = "Option::is_none")] - pub ip_range: Option, - - #[serde(rename = "Gateway")] - #[serde(skip_serializing_if = "Option::is_none")] - pub gateway: Option, - - #[serde(rename = "AuxiliaryAddresses")] - #[serde(skip_serializing_if = "Option::is_none")] - pub auxiliary_addresses: Option>, -} - -#[cfg(feature = "docker")] -impl From for IpamConfig { - fn from(value: bollard::service::IpamConfig) -> Self { - Self { - subnet: value.subnet, - ip_range: value.ip_range, - gateway: value.gateway, - auxiliary_addresses: value.auxiliary_addresses, - } - } -} - -/// A container on a network. -#[typeshare] -#[derive( - Debug, Clone, Default, PartialEq, Serialize, Deserialize, -)] -pub struct NetworkContainer { - #[serde(rename = "Name")] - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, - - #[serde(rename = "EndpointID")] - #[serde(skip_serializing_if = "Option::is_none")] - pub endpoint_id: Option, - - #[serde(rename = "MacAddress")] - #[serde(skip_serializing_if = "Option::is_none")] - pub mac_address: Option, - - #[serde(rename = "IPv4Address")] - #[serde(skip_serializing_if = "Option::is_none")] - pub ipv4_address: Option, - - #[serde(rename = "IPv6Address")] - #[serde(skip_serializing_if = "Option::is_none")] - pub ipv6_address: Option, -} - -#[cfg(feature = "docker")] -impl From for NetworkContainer { - fn from(value: bollard::service::NetworkContainer) -> Self { - Self { - name: value.name, - endpoint_id: value.endpoint_id, - mac_address: value.mac_address, - ipv4_address: value.ipv4_address, - ipv6_address: value.ipv6_address, - } - } -} diff --git a/client/core/rs/src/entities/server_template/hetzner.rs b/client/core/rs/src/entities/server_template/hetzner.rs index 074b603d2..c5c6ec2b8 100644 --- a/client/core/rs/src/entities/server_template/hetzner.rs +++ b/client/core/rs/src/entities/server_template/hetzner.rs @@ -214,4 +214,5 @@ pub enum HetznerDatacenter { Falkenstein1Dc14, AshburnDc1, HillsboroDc1, + SingaporeDc1, } diff --git a/client/core/rs/src/entities/stack.rs b/client/core/rs/src/entities/stack.rs index 2e5549662..ec1693c75 100644 --- a/client/core/rs/src/entities/stack.rs +++ b/client/core/rs/src/entities/stack.rs @@ -9,9 +9,9 @@ use strum::Display; use typeshare::typeshare; use super::{ - deployment::ContainerSummary, + docker::container::ContainerListItem, resource::{Resource, ResourceListItem, ResourceQuery}, - to_monitor_name, EnvironmentVar, + to_komodo_name, EnvironmentVar, }; #[typeshare] @@ -30,8 +30,8 @@ impl Stack { .config .project_name .is_empty() - .then(|| to_monitor_name(&self.name)) - .unwrap_or_else(|| to_monitor_name(&self.config.project_name)) + .then(|| to_komodo_name(&self.name)) + .unwrap_or_else(|| to_komodo_name(&self.config.project_name)) } pub fn file_paths(&self) -> &[String] { @@ -133,8 +133,8 @@ pub struct StackInfo { pub missing_files: Vec, /// The deployed project name. - /// This is updated whenever Monitor successfully deploys the stack. - /// If it is present, Monitor will use it for actions over other options, + /// This is updated whenever Komodo successfully deploys the stack. + /// If it is present, Komodo will use it for actions over other options, /// to ensure control is maintained after changing the project name (there is no rename compose project api). pub deployed_project_name: Option, @@ -142,7 +142,7 @@ pub struct StackInfo { pub deployed_hash: Option, /// Deployed commit message, or null. Only for repo based stacks pub deployed_message: Option, - /// The deployed compose file contents. This is updated whenever Monitor successfully deploys the stack. + /// The deployed compose file contents. This is updated whenever Komodo successfully deploys the stack. pub deployed_contents: Option>, /// The deployed service names. /// This is updated whenever it is empty, or deployed contents is updated. @@ -154,7 +154,7 @@ pub struct StackInfo { pub latest_services: Vec, /// The remote compose file contents, whether on host or in repo. - /// This is updated whenever Monitor refreshes the stack cache. + /// This is updated whenever Komodo refreshes the stack cache. /// It will be empty if the file is defined directly in the stack config. pub remote_contents: Option>, /// If there was an error in getting the remote contents, it will be here. @@ -277,7 +277,7 @@ pub struct StackConfig { /// Whether to use https to clone the repo (versus http). Default: true /// - /// Note. Monitor does not currently support cloning repos via ssh. + /// Note. Komodo does not currently support cloning repos via ssh. #[serde(default = "default_git_https")] #[builder(default = "default_git_https()")] #[partial_default(default_git_https())] @@ -422,7 +422,7 @@ pub struct StackServiceNames { /// /// 1. The name of the compose project (top level name field of compose file). /// This defaults to the name of the parent folder of the compose file. - /// Monitor will always set it to be the name of the stack, but imported stacks + /// Komodo will always set it to be the name of the stack, but imported stacks /// will have a different name. /// 2. The service name /// 3. The replica number @@ -440,7 +440,7 @@ pub struct StackService { /// The service name pub service: String, /// The container - pub container: Option, + pub container: Option, } #[typeshare] diff --git a/client/core/rs/src/entities/server/stats.rs b/client/core/rs/src/entities/stats.rs similarity index 83% rename from client/core/rs/src/entities/server/stats.rs rename to client/core/rs/src/entities/stats.rs index 429888755..1d9b7ca11 100644 --- a/client/core/rs/src/entities/server/stats.rs +++ b/client/core/rs/src/entities/stats.rs @@ -1,7 +1,6 @@ -use std::{collections::HashMap, path::PathBuf}; +use std::path::PathBuf; use serde::{Deserialize, Serialize}; -use strum::{Display, EnumString}; use typeshare::typeshare; use crate::entities::{Timelength, I64}; @@ -137,39 +136,3 @@ pub struct SystemProcess { /// Process disk write in KB/s pub disk_write_kb: f64, } - -/// Summary of the health of the server. -#[typeshare] -#[derive(Serialize, Deserialize, Default, Debug, Clone)] -pub struct ServerHealth { - pub cpu: SeverityLevel, - pub mem: SeverityLevel, - pub disks: HashMap, -} - -/// Severity level of problem. -#[typeshare] -#[derive( - Serialize, - Deserialize, - Debug, - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Default, - Display, - EnumString, -)] -#[serde(rename_all = "UPPERCASE")] -#[strum(serialize_all = "UPPERCASE")] -pub enum SeverityLevel { - /// No problem. - #[default] - Ok, - /// Problem is imminent. - Warning, - /// Problem fully realized. - Critical, -} diff --git a/client/core/rs/src/entities/sync.rs b/client/core/rs/src/entities/sync.rs index 407113a52..0b7ed15ab 100644 --- a/client/core/rs/src/entities/sync.rs +++ b/client/core/rs/src/entities/sync.rs @@ -190,7 +190,7 @@ pub struct ResourceSyncConfig { /// Whether to use https to clone the repo (versus http). Default: true /// - /// Note. Monitor does not currently support cloning repos via ssh. + /// Note. Komodo does not currently support cloning repos via ssh. #[serde(default = "default_git_https")] #[builder(default = "default_git_https()")] #[partial_default(default_git_https())] @@ -223,7 +223,7 @@ pub struct ResourceSyncConfig { /// The path of the resource file(s) to sync, relative to the repo root. /// Can be a specific file, or a directory containing multiple files / folders. - /// See `https://docs.monitor.dev/docs/sync-resources` for more information. + /// See [https://komo.do/docs/sync-resources](https://komo.do/docs/sync-resources) for more information. #[serde(default = "default_resource_path")] #[builder(default = "default_resource_path()")] #[partial_default(default_resource_path())] diff --git a/client/core/rs/src/entities/toml.rs b/client/core/rs/src/entities/toml.rs index 77e790512..2b713121c 100644 --- a/client/core/rs/src/entities/toml.rs +++ b/client/core/rs/src/entities/toml.rs @@ -3,22 +3,16 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; use super::{ - alerter::PartialAlerterConfig, - build::PartialBuildConfig, - builder::PartialBuilderConfig, - deployment::PartialDeploymentConfig, - permission::PermissionLevel, - procedure::PartialProcedureConfig, - repo::PartialRepoConfig, - server::PartialServerConfig, + alerter::PartialAlerterConfig, build::PartialBuildConfig, + builder::PartialBuilderConfig, deployment::PartialDeploymentConfig, + permission::PermissionLevel, procedure::PartialProcedureConfig, + repo::PartialRepoConfig, server::PartialServerConfig, server_template::PartialServerTemplateConfig, - stack::PartialStackConfig, - sync::PartialResourceSyncConfig, - update::{ResourceTarget, ResourceTargetVariant}, - variable::Variable, + stack::PartialStackConfig, sync::PartialResourceSyncConfig, + variable::Variable, ResourceTarget, ResourceTargetVariant, }; -/// Specifies resources to sync on monitor +/// Specifies resources to sync on Komodo #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct ResourcesToml { #[serde( diff --git a/client/core/rs/src/entities/update.rs b/client/core/rs/src/entities/update.rs index e6ac61dea..896c51da4 100644 --- a/client/core/rs/src/entities/update.rs +++ b/client/core/rs/src/entities/update.rs @@ -1,21 +1,15 @@ use async_timing_util::unix_timestamp_ms; -use derive_variants::{EnumVariants, ExtractVariant}; use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, Display, EnumString}; +use strum::{Display, EnumString}; use typeshare::typeshare; use crate::entities::{ - all_logs_success, monitor_timestamp, MongoId, Operation, I64, + all_logs_success, komodo_timestamp, MongoId, Operation, I64, }; -use super::{ - alerter::Alerter, build::Build, builder::Builder, - deployment::Deployment, procedure::Procedure, repo::Repo, - server::Server, server_template::ServerTemplate, stack::Stack, - sync::ResourceSync, Version, -}; +use super::{ResourceTarget, Version}; -/// Represents an action performed by Monitor. +/// Represents an action performed by Komodo. #[typeshare] #[derive(Serialize, Deserialize, Debug, Clone, Default)] #[cfg_attr( @@ -103,12 +97,12 @@ impl Update { pub fn finalize(&mut self) { self.success = all_logs_success(&self.logs); - self.end_ts = Some(monitor_timestamp()); + self.end_ts = Some(komodo_timestamp()); self.status = UpdateStatus::Complete; } } -/// Minimal representation of an action performed by Monitor. +/// Minimal representation of an action performed by Komodo. #[typeshare] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct UpdateListItem { @@ -202,139 +196,6 @@ impl Log { } } -/// Used to reference a specific resource across all resource types -#[typeshare] -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Hash, - Serialize, - Deserialize, - EnumVariants, -)] -#[variant_derive( - Debug, - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - Display, - EnumString, - AsRefStr -)] -#[serde(tag = "type", content = "id")] -pub enum ResourceTarget { - System(String), - Build(String), - Builder(String), - Deployment(String), - Server(String), - Repo(String), - Alerter(String), - Procedure(String), - ServerTemplate(String), - ResourceSync(String), - Stack(String), -} - -impl ResourceTarget { - pub fn extract_variant_id( - &self, - ) -> (ResourceTargetVariant, &String) { - let id = match &self { - ResourceTarget::System(id) => id, - ResourceTarget::Build(id) => id, - ResourceTarget::Builder(id) => id, - ResourceTarget::Deployment(id) => id, - ResourceTarget::Server(id) => id, - ResourceTarget::Repo(id) => id, - ResourceTarget::Alerter(id) => id, - ResourceTarget::Procedure(id) => id, - ResourceTarget::ServerTemplate(id) => id, - ResourceTarget::ResourceSync(id) => id, - ResourceTarget::Stack(id) => id, - }; - (self.extract_variant(), id) - } - - pub fn system() -> ResourceTarget { - Self::System("system".to_string()) - } -} - -impl Default for ResourceTarget { - fn default() -> Self { - ResourceTarget::system() - } -} - -impl From<&Build> for ResourceTarget { - fn from(build: &Build) -> Self { - Self::Build(build.id.clone()) - } -} - -impl From<&Deployment> for ResourceTarget { - fn from(deployment: &Deployment) -> Self { - Self::Deployment(deployment.id.clone()) - } -} - -impl From<&Server> for ResourceTarget { - fn from(server: &Server) -> Self { - Self::Server(server.id.clone()) - } -} - -impl From<&Repo> for ResourceTarget { - fn from(repo: &Repo) -> Self { - Self::Repo(repo.id.clone()) - } -} - -impl From<&Builder> for ResourceTarget { - fn from(builder: &Builder) -> Self { - Self::Builder(builder.id.clone()) - } -} - -impl From<&Alerter> for ResourceTarget { - fn from(alerter: &Alerter) -> Self { - Self::Alerter(alerter.id.clone()) - } -} - -impl From<&Procedure> for ResourceTarget { - fn from(procedure: &Procedure) -> Self { - Self::Procedure(procedure.id.clone()) - } -} - -impl From<&ServerTemplate> for ResourceTarget { - fn from(server_template: &ServerTemplate) -> Self { - Self::ServerTemplate(server_template.id.clone()) - } -} - -impl From<&ResourceSync> for ResourceTarget { - fn from(resource_sync: &ResourceSync) -> Self { - Self::ResourceSync(resource_sync.id.clone()) - } -} - -impl From<&Stack> for ResourceTarget { - fn from(resource_sync: &Stack) -> Self { - Self::Stack(resource_sync.id.clone()) - } -} - /// An update's status #[typeshare] #[derive( diff --git a/client/core/rs/src/entities/user.rs b/client/core/rs/src/entities/user.rs index 73bcb69e5..fab9d9801 100644 --- a/client/core/rs/src/entities/user.rs +++ b/client/core/rs/src/entities/user.rs @@ -6,7 +6,7 @@ use typeshare::typeshare; use crate::entities::{MongoId, I64}; use super::{ - permission::PermissionLevel, update::ResourceTargetVariant, + permission::PermissionLevel, ResourceTargetVariant, }; #[typeshare] @@ -79,7 +79,7 @@ impl User { } /// Returns whether user is an inbuilt service user - /// + /// /// NOTE: ALSO UPDATE `frontend/src/lib/utils/is_service_user` to match pub fn is_service_user(user_id: &str) -> bool { matches!( diff --git a/client/core/rs/src/entities/user_group.rs b/client/core/rs/src/entities/user_group.rs index d152c5047..8f6c6832e 100644 --- a/client/core/rs/src/entities/user_group.rs +++ b/client/core/rs/src/entities/user_group.rs @@ -4,8 +4,7 @@ use serde::{Deserialize, Serialize}; use typeshare::typeshare; use super::{ - permission::PermissionLevel, update::ResourceTargetVariant, - MongoId, I64, + permission::PermissionLevel, MongoId, ResourceTargetVariant, I64, }; /// Permission users at the group level. diff --git a/client/core/rs/src/entities/variable.rs b/client/core/rs/src/entities/variable.rs index 406aca96d..2a377db30 100644 --- a/client/core/rs/src/entities/variable.rs +++ b/client/core/rs/src/entities/variable.rs @@ -20,4 +20,12 @@ pub struct Variable { /// The value associated with the variable. #[serde(default)] pub value: String, + /// If marked as secret, the variable value will be hidden in updates / logs. + /// Additionally the value will not be served in read requests by non admin users. + /// + /// Note that the value is NOT encrypted in the database, and will likely show up in database logs. + /// The security of these variables comes down to the security + /// of the database (system level encryption, network isolation, etc.) + #[serde(default)] + pub is_secret: bool, } diff --git a/client/core/rs/src/lib.rs b/client/core/rs/src/lib.rs index 3b0f7a546..da8adbe10 100644 --- a/client/core/rs/src/lib.rs +++ b/client/core/rs/src/lib.rs @@ -1,31 +1,31 @@ -//! # Monitor +//! # Komodo //! *A system to build and deploy software accross many servers* //! -//! This is a client library for the monitor core API. +//! This is a client library for the Komodo Core API. //! It contains: //! - Definitions for the application [api] and [entities]. -//! - A [client][MonitorClient] to interact with the monitor core API. -//! - Information on configuring monitor [core][entities::config::core] and [periphery][entities::config::periphery]. +//! - A [client][KomodoClient] to interact with the Komodo Core API. +//! - Information on configuring Komodo [core][entities::config::core] and [periphery][entities::config::periphery]. //! //! ## Client Configuration //! -//! The client includes a convenenience method to parse the monitor url and credentials from the environment: -//! - MONITOR_ADDRESS -//! - MONITOR_API_KEY -//! - MONITOR_API_SECRET +//! The client includes a convenenience method to parse the Komodo API url and credentials from the environment: +//! - KOMODO_ADDRESS +//! - KOMODO_API_KEY +//! - KOMODO_API_SECRET //! //! ## Client Example //! ``` //! dotenvy::dotenv().ok(); //! -//! let client = MonitorClient::new_from_env()?; +//! let client = KomodoClient::new_from_env()?; //! //! // Get all the deployments //! let deployments = client.read(ListDeployments::default()).await?; //! //! println!("{deployments:#?}"); //! -//! let update = client.execute +//! let update = client.execute(RunBuild { build: "test-build".to_string() }).await?: //! ``` use anyhow::Context; @@ -40,28 +40,28 @@ pub mod ws; mod request; #[derive(Deserialize)] -struct MonitorEnv { - monitor_address: String, - monitor_api_key: String, - monitor_api_secret: String, +struct KomodoEnv { + komodo_address: String, + komodo_api_key: String, + komodo_api_secret: String, } #[derive(Clone)] -pub struct MonitorClient { +pub struct KomodoClient { reqwest: reqwest::Client, address: String, key: String, secret: String, } -impl MonitorClient { +impl KomodoClient { #[tracing::instrument(skip_all)] pub async fn new( address: impl Into, key: impl Into, secret: impl Into, - ) -> anyhow::Result { - let client = MonitorClient { + ) -> anyhow::Result { + let client = KomodoClient { reqwest: Default::default(), address: address.into(), key: key.into(), @@ -72,17 +72,17 @@ impl MonitorClient { } #[tracing::instrument] - pub async fn new_from_env() -> anyhow::Result { - let MonitorEnv { - monitor_address, - monitor_api_key, - monitor_api_secret, + pub async fn new_from_env() -> anyhow::Result { + let KomodoEnv { + komodo_address, + komodo_api_key, + komodo_api_secret, } = envy::from_env() - .context("failed to parse environment for monitor client")?; - MonitorClient::new( - monitor_address, - monitor_api_key, - monitor_api_secret, + .context("failed to parse environment for komodo client")?; + KomodoClient::new( + komodo_address, + komodo_api_key, + komodo_api_secret, ) .await } diff --git a/client/core/rs/src/request.rs b/client/core/rs/src/request.rs index 1381c012b..3a9f0cdcb 100644 --- a/client/core/rs/src/request.rs +++ b/client/core/rs/src/request.rs @@ -6,16 +6,16 @@ use serror::deserialize_error; use crate::{ api::{ - auth::MonitorAuthRequest, execute::MonitorExecuteRequest, - read::MonitorReadRequest, user::MonitorUserRequest, - write::MonitorWriteRequest, + auth::KomodoAuthRequest, execute::KomodoExecuteRequest, + read::KomodoReadRequest, user::KomodoUserRequest, + write::KomodoWriteRequest, }, - MonitorClient, + KomodoClient, }; -impl MonitorClient { +impl KomodoClient { #[tracing::instrument(skip(self))] - pub async fn auth( + pub async fn auth( &self, request: T, ) -> anyhow::Result { @@ -31,7 +31,7 @@ impl MonitorClient { } #[tracing::instrument(skip(self))] - pub async fn user( + pub async fn user( &self, request: T, ) -> anyhow::Result { @@ -47,7 +47,7 @@ impl MonitorClient { } #[tracing::instrument(skip(self))] - pub async fn read( + pub async fn read( &self, request: T, ) -> anyhow::Result { @@ -63,7 +63,7 @@ impl MonitorClient { } #[tracing::instrument(skip(self))] - pub async fn write( + pub async fn write( &self, request: T, ) -> anyhow::Result { @@ -79,7 +79,7 @@ impl MonitorClient { } #[tracing::instrument(skip(self))] - pub async fn execute( + pub async fn execute( &self, request: T, ) -> anyhow::Result { @@ -111,7 +111,7 @@ impl MonitorClient { .header("Content-Type", "application/json") .json(&body); let res = - req.send().await.context("failed to reach monitor api")?; + req.send().await.context("failed to reach Komodo API")?; tracing::debug!("got response"); let status = res.status(); if status == StatusCode::OK { diff --git a/client/core/rs/src/ws.rs b/client/core/rs/src/ws.rs index c9a151947..05de5d791 100644 --- a/client/core/rs/src/ws.rs +++ b/client/core/rs/src/ws.rs @@ -12,7 +12,7 @@ use tracing::{info, info_span, warn, Instrument}; use typeshare::typeshare; use uuid::Uuid; -use crate::{entities::update::UpdateListItem, MonitorClient}; +use crate::{entities::update::UpdateListItem, KomodoClient}; #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -56,7 +56,7 @@ pub enum UpdateWsError { const MAX_SHORT_RETRY_COUNT: usize = 5; -impl MonitorClient { +impl KomodoClient { pub fn subscribe_to_updates( &self, capacity: usize, @@ -117,7 +117,7 @@ impl MonitorClient { let mut ws = match connect_async(&address).await.with_context(|| { format!( - "failed to connect to monitor websocket at {address}" + "failed to connect to Komodo update websocket at {address}" ) }) { Ok((ws, _)) => ws, diff --git a/client/core/ts/package.json b/client/core/ts/package.json index 40f20ea6c..ca6943197 100644 --- a/client/core/ts/package.json +++ b/client/core/ts/package.json @@ -1,5 +1,5 @@ { - "name": "@monitor/client", + "name": "@komodo/client", "version": "1.0.0", "main": "dist/lib.js", "license": "MIT", diff --git a/client/core/ts/src/lib.ts b/client/core/ts/src/lib.ts index 9bc1b11cf..4c316ad36 100644 --- a/client/core/ts/src/lib.ts +++ b/client/core/ts/src/lib.ts @@ -20,7 +20,7 @@ type InitOptions = | { type: "jwt"; params: { jwt: string } } | { type: "api-key"; params: { api_key: string; secret: string } }; -export function MonitorClient(url: string, options: InitOptions) { +export function KomodoClient(url: string, options: InitOptions) { const state = { jwt: options.type === "jwt" ? options.params.jwt : undefined, api_key: options.type === "api-key" ? options.params.api_key : undefined, diff --git a/client/core/ts/src/responses.ts b/client/core/ts/src/responses.ts index 5aee72a49..18a6f20d9 100644 --- a/client/core/ts/src/responses.ts +++ b/client/core/ts/src/responses.ts @@ -59,8 +59,17 @@ export type ReadResponses = { GetServerState: Types.GetServerStateResponse; GetPeripheryVersion: Types.GetPeripheryVersionResponse; ListDockerContainers: Types.ListDockerContainersResponse; - ListDockerImages: Types.ListDockerImagesResponse; + InspectDockerContainer: Types.InspectDockerContainerResponse; + GetResourceMatchingContainer: Types.GetResourceMatchingContainerResponse; + GetContainerLog: Types.GetContainerLogResponse; + SearchContainerLog: Types.SearchContainerLogResponse; ListDockerNetworks: Types.ListDockerNetworksResponse; + InspectDockerNetwork: Types.InspectDockerNetworkResponse; + ListDockerImages: Types.ListDockerImagesResponse; + InspectDockerImage: Types.InspectDockerImageResponse; + ListDockerImageHistory: Types.ListDockerImageHistoryResponse; + ListDockerVolumes: Types.ListDockerVolumesResponse; + InspectDockerVolume: Types.InspectDockerVolumeResponse; ListComposeProjects: Types.ListComposeProjectsResponse; GetServerActionState: Types.GetServerActionStateResponse; GetHistoricalServerStats: Types.GetHistoricalServerStatsResponse; @@ -73,8 +82,8 @@ export type ReadResponses = { GetDeploymentContainer: Types.GetDeploymentContainerResponse; GetDeploymentActionState: Types.GetDeploymentActionStateResponse; GetDeploymentStats: Types.GetDeploymentStatsResponse; - GetLog: Types.GetLogResponse; - SearchLog: Types.SearchLogResponse; + GetDeploymentLog: Types.GetDeploymentLogResponse; + SearchDeploymentLog: Types.SearchDeploymentLogResponse; ListDeployments: Types.ListDeploymentsResponse; ListFullDeployments: Types.ListFullDeploymentsResponse; ListCommonDeploymentExtraArgs: Types.ListCommonDeploymentExtraArgsResponse; @@ -192,7 +201,6 @@ export type WriteResponses = { UpdateServer: Types.Server; RenameServer: Types.Update; CreateNetwork: Types.Update; - DeleteNetwork: Types.Update; // ==== DEPLOYMENT ==== CreateDeployment: Types.Deployment; @@ -272,6 +280,7 @@ export type WriteResponses = { CreateVariable: Types.CreateVariableResponse; UpdateVariableValue: Types.UpdateVariableValueResponse; UpdateVariableDescription: Types.UpdateVariableDescriptionResponse; + UpdateVariableIsSecret: Types.UpdateVariableIsSecretResponse; DeleteVariable: Types.DeleteVariableResponse; // ==== PROVIDERS ==== @@ -285,19 +294,34 @@ export type WriteResponses = { export type ExecuteResponses = { // ==== SERVER ==== - StopAllContainers: Types.Update; - PruneContainers: Types.Update; - PruneImages: Types.Update; - PruneNetworks: Types.Update; - - // ==== DEPLOYMENT ==== - Deploy: Types.Update; StartContainer: Types.Update; RestartContainer: Types.Update; PauseContainer: Types.Update; UnpauseContainer: Types.Update; StopContainer: Types.Update; - RemoveContainer: Types.Update; + DestroyContainer: Types.Update; + StartAllContainers: Types.Update; + RestartAllContainers: Types.Update; + PauseAllContainers: Types.Update; + UnpauseAllContainers: Types.Update; + StopAllContainers: Types.Update; + PruneContainers: Types.Update; + DeleteNetwork: Types.Update; + PruneNetworks: Types.Update; + DeleteImage: Types.Update; + PruneImages: Types.Update; + DeleteVolume: Types.Update; + PruneVolumes: Types.Update; + PruneSystem: Types.Update; + + // ==== DEPLOYMENT ==== + Deploy: Types.Update; + StartDeployment: Types.Update; + RestartDeployment: Types.Update; + PauseDeployment: Types.Update; + UnpauseDeployment: Types.Update; + StopDeployment: Types.Update; + DestroyDeployment: Types.Update; // ==== BUILD ==== RunBuild: Types.Update; diff --git a/client/core/ts/src/types.ts b/client/core/ts/src/types.ts index b7b92d60d..3bf4550da 100644 --- a/client/core/ts/src/types.ts +++ b/client/core/ts/src/types.ts @@ -390,7 +390,7 @@ export interface BuildConfig { image_name?: string; /** * An extra tag put before the build version, for the image pushed to the repository. - * Eg. in image tag of `aarch64` would push to mbecker20/monitor_core:aarch64-1.13.2. + * Eg. in image tag of `aarch64` would push to mbecker20/komodo:1.13.2-aarch64. * If this is empty, the image tag will just be the build version. * * Can be used in conjunction with `image_name` to direct multiple builds @@ -403,7 +403,7 @@ export interface BuildConfig { /** * Whether to use https to clone the repo (versus http). Default: true * - * Note. Monitor does not currently support cloning repos via ssh. + * Note. Komodo does not currently support cloning repos via ssh. */ git_https: boolean; /** @@ -568,9 +568,9 @@ export type DeploymentImage = /** The docker image, can be from any registry that works with docker and that the host server can reach. */ image?: string; }} - /** Deploy a monitor build. */ + /** Deploy a Komodo Build. */ | { type: "Build", params: { - /** The id of the build */ + /** The id of the Build */ build_id?: string; /** * Use a custom / older version of the image produced by the build. @@ -610,7 +610,7 @@ export interface DeploymentConfig { server_id?: string; /** * The image which the deployment deploys. - * Can either be a user inputted image, or a Monitor build. + * Can either be a user inputted image, or a Komodo Build. */ image?: DeploymentImage; /** @@ -706,7 +706,7 @@ export interface DeploymentListItemInfo { image: string; /** The server that deployment sits on. */ server_id: string; - /** An attached monitor build, if it exists. */ + /** An attached Komodo Build, if it exists. */ build_id?: string; } @@ -734,11 +734,11 @@ export interface Log { end_ts: I64; } -export type GetLogResponse = Log; +export type GetDeploymentLogResponse = Log; -export type SearchLogResponse = Log; +export type SearchDeploymentLogResponse = Log; -export interface DockerContainerStats { +export interface ContainerStats { name: string; cpu_perc: string; mem_perc: string; @@ -748,7 +748,7 @@ export interface DockerContainerStats { pids: string; } -export type GetDeploymentStatsResponse = DockerContainerStats; +export type GetDeploymentStatsResponse = ContainerStats; export interface DeploymentActionState { deploying: boolean; @@ -757,7 +757,7 @@ export interface DeploymentActionState { pausing: boolean; unpausing: boolean; stopping: boolean; - removing: boolean; + destroying: boolean; renaming: boolean; } @@ -825,7 +825,7 @@ export type GetPermissionLevelResponse = PermissionLevel; export type ListUserTargetPermissionsResponse = Permission[]; -/** A wrapper for all monitor exections. */ +/** A wrapper for all Komodo exections. */ export type Execution = /** The "null" execution. Does nothing. */ | { type: "None", params: NoData } @@ -833,20 +833,35 @@ export type Execution = | { type: "RunBuild", params: RunBuild } | { type: "CancelBuild", params: CancelBuild } | { type: "Deploy", params: Deploy } + | { type: "StartDeployment", params: StartDeployment } + | { type: "RestartDeployment", params: RestartDeployment } + | { type: "PauseDeployment", params: PauseDeployment } + | { type: "UnpauseDeployment", params: UnpauseDeployment } + | { type: "StopDeployment", params: StopDeployment } + | { type: "DestroyDeployment", params: DestroyDeployment } + | { type: "CloneRepo", params: CloneRepo } + | { type: "PullRepo", params: PullRepo } + | { type: "BuildRepo", params: BuildRepo } + | { type: "CancelRepoBuild", params: CancelRepoBuild } | { type: "StartContainer", params: StartContainer } | { type: "RestartContainer", params: RestartContainer } | { type: "PauseContainer", params: PauseContainer } | { type: "UnpauseContainer", params: UnpauseContainer } | { type: "StopContainer", params: StopContainer } - | { type: "RemoveContainer", params: RemoveContainer } - | { type: "CloneRepo", params: CloneRepo } - | { type: "PullRepo", params: PullRepo } - | { type: "BuildRepo", params: BuildRepo } - | { type: "CancelRepoBuild", params: CancelRepoBuild } + | { type: "DestroyContainer", params: DestroyContainer } + | { type: "StartAllContainers", params: StartAllContainers } + | { type: "RestartAllContainers", params: RestartAllContainers } + | { type: "PauseAllContainers", params: PauseAllContainers } + | { type: "UnpauseAllContainers", params: UnpauseAllContainers } | { type: "StopAllContainers", params: StopAllContainers } - | { type: "PruneNetworks", params: PruneNetworks } - | { type: "PruneImages", params: PruneImages } | { type: "PruneContainers", params: PruneContainers } + | { type: "DeleteNetwork", params: DeleteNetwork } + | { type: "PruneNetworks", params: PruneNetworks } + | { type: "DeleteImage", params: DeleteImage } + | { type: "PruneImages", params: PruneImages } + | { type: "DeleteVolume", params: DeleteVolume } + | { type: "PruneVolumes", params: PruneVolumes } + | { type: "PruneSystem", params: PruneSystem } | { type: "RunSync", params: RunSync } | { type: "DeployStack", params: DeployStack } | { type: "StartStack", params: StartStack } @@ -998,7 +1013,7 @@ export interface RepoConfig { /** * Whether to use https to clone the repo (versus http). Default: true * - * Note. Monitor does not currently support cloning repos via ssh. + * Note. Komodo does not currently support cloning repos via ssh. */ git_https: boolean; /** @@ -1225,32 +1240,62 @@ export interface ServerActionState { pruning_containers: boolean; /** Server currently pruning images */ pruning_images: boolean; - /** Server currently stopping all containers. */ + /** Server currently pruning images */ + pruning_volumes: boolean; + /** Server currently pruning system */ + pruning_system: boolean; + /** Server currently starting containers. */ + starting_containers: boolean; + /** Server currently restarting containers. */ + restarting_containers: boolean; + /** Server currently pausing containers. */ + pausing_containers: boolean; + /** Server currently unpausing containers. */ + unpausing_containers: boolean; + /** Server currently stopping containers. */ stopping_containers: boolean; } export type GetServerActionStateResponse = ServerActionState; -/** Ipam Configuration. */ +export interface NetworkListItem { + name?: string; + id?: string; + created?: string; + scope?: string; + driver?: string; + enable_ipv6?: boolean; + ipam_driver?: string; + ipam_subnet?: string; + ipam_gateway?: string; + internal?: boolean; + attachable?: boolean; + ingress?: boolean; + /** Whether the network is attached to one or more containers */ + in_use: boolean; +} + +export type ListDockerNetworksResponse = NetworkListItem[]; + export interface IpamConfig { Subnet?: string; IPRange?: string; Gateway?: string; - AuxiliaryAddresses?: Record; + AuxiliaryAddresses: Record; } -/** Ipam related information */ export interface Ipam { /** Name of the IPAM driver to use. */ Driver?: string; /** List of IPAM configuration options, specified as a map: ``` {\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": } ``` */ - Config?: IpamConfig[]; + Config: IpamConfig[]; /** Driver-specific options, specified as a map. */ - Options?: Record; + Options: Record; } -/** A container on a network. */ export interface NetworkContainer { + /** This is the key on the incoming map of NetworkContainer */ + ContainerID?: string; Name?: string; EndpointID?: string; MacAddress?: string; @@ -1258,13 +1303,9 @@ export interface NetworkContainer { IPv6Address?: string; } -/** Summary of a docker network on a server. */ -export interface DockerNetwork { - /** The name of the docker network */ +export interface Network { Name?: string; - /** The Id of the docker network */ Id?: string; - /** Timestamp network created */ Created?: string; Scope?: string; Driver?: string; @@ -1273,60 +1314,844 @@ export interface DockerNetwork { Internal?: boolean; Attachable?: boolean; Ingress?: boolean; - Containers?: Record; + /** This field is turned from map into array for easier usability. */ + Containers: NetworkContainer[]; Options?: Record; Labels?: Record; } -export type ListDockerNetworksResponse = DockerNetwork[]; +export type InspectDockerNetworkResponse = Network; -/** Summary of a docker image cached on a server */ -export interface ImageSummary { - /** ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. */ - Id: string; - /** ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. */ - ParentId: string; - /** List of image names/tags in the local image cache that reference this image. Multiple image tags can refer to the same image, and this list may be empty if no tags reference the image, in which case the image is \"untagged\", in which case it can still be referenced by its ID. */ - RepoTags: string[]; - /** List of content-addressable digests of locally available image manifests that the image is referenced from. Multiple manifests can refer to the same image. These digests are usually only available if the image was either pulled from a registry, or if the image was pushed to a registry, which is when the manifest is generated and its digest calculated. */ - RepoDigests: string[]; - /** Date and time at which the image was created as a Unix timestamp (number of seconds sinds EPOCH). */ - Created: I64; - /** Total size of the image including all layers it is composed of. */ - Size: I64; - /** Total size of image layers that are shared between this image and other images. This size is not calculated by default. `-1` indicates that the value has not been set / calculated. */ - SharedSize: I64; - /** Total size of the image including all layers it is composed of. In versions of Docker before v1.10, this field was calculated from the image itself and all of its parent images. Docker v1.10 and up store images self-contained, and no longer use a parent-chain, making this field an equivalent of the Size field. This field is kept for backward compatibility, but may be removed in a future version of the API. */ - VirtualSize?: I64; - /** User-defined key/value metadata. */ - Labels: Record; - /** Number of containers using this image. Includes both stopped and running containers. This size is not calculated by default, and depends on which API endpoint is used. `-1` indicates that the value has not been set / calculated. */ - Containers: I64; -} - -export type ListDockerImagesResponse = ImageSummary[]; - -/** A summary of a docker container on a server. */ -export interface ContainerSummary { - /** Name of the container. */ +export interface ImageListItem { + /** The first tag in `repo_tags`, or Id if no tags. */ name: string; - /** Id of the container. */ + /** ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. */ id: string; - /** The image the container is based on. */ - image: string; - /** The docker labels on the container. */ - labels: Record; - /** The state of the container, like `running` or `not_deployed` */ - state: DeploymentState; - /** The status string of the docker container. */ - status?: string; - /** The network mode of the container. */ - network_mode?: string; - /** Network names attached to the container */ - networks?: string[]; + /** ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. */ + parent_id: string; + /** Date and time at which the image was created as a Unix timestamp (number of seconds sinds EPOCH). */ + created: I64; + /** Total size of the image including all layers it is composed of. */ + size: I64; + /** Whether the image is in use by any container */ + in_use: boolean; } -export type ListDockerContainersResponse = ContainerSummary[]; +export type ListDockerImagesResponse = ImageListItem[]; + +/** A test to perform to check that the container is healthy. */ +export interface HealthConfig { + /** The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `[\"NONE\"]` disable healthcheck - `[\"CMD\", args...]` exec arguments directly - `[\"CMD-SHELL\", command]` run command with system's default shell */ + Test?: string[]; + /** The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. */ + Interval?: I64; + /** The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. */ + Timeout?: I64; + /** The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. */ + Retries?: I64; + /** Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. */ + StartPeriod?: I64; + /** The time to wait between checks in nanoseconds during the start period. It should be 0 or at least 1000000 (1 ms). 0 means inherit. */ + StartInterval?: I64; +} + +/** Configuration for a container that is portable between hosts. When used as `ContainerConfig` field in an image, `ContainerConfig` is an optional field containing the configuration of the container that was last committed when creating the image. Previous versions of Docker builder used this field to store build cache, and it is not in active use anymore. */ +export interface ContainerConfig { + /** The hostname to use for the container, as a valid RFC 1123 hostname. */ + Hostname?: string; + /** The domain name to use for the container. */ + Domainname?: string; + /** The user that commands are run as inside the container. */ + User?: string; + /** Whether to attach to `stdin`. */ + AttachStdin?: boolean; + /** Whether to attach to `stdout`. */ + AttachStdout?: boolean; + /** Whether to attach to `stderr`. */ + AttachStderr?: boolean; + /** An object mapping ports to an empty object in the form: `{\"/\": {}}` */ + ExposedPorts?: Record>; + /** Attach standard streams to a TTY, including `stdin` if it is not closed. */ + Tty?: boolean; + /** Open `stdin` */ + OpenStdin?: boolean; + /** Close `stdin` after one attached client disconnects */ + StdinOnce?: boolean; + /** A list of environment variables to set inside the container in the form `[\"VAR=value\", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. */ + Env?: string[]; + /** Command to run specified as a string or an array of strings. */ + Cmd?: string[]; + Healthcheck?: HealthConfig; + /** Command is already escaped (Windows only) */ + ArgsEscaped?: boolean; + /** The name (or reference) of the image to use when creating the container, or which was used when the container was created. */ + Image?: string; + /** An object mapping mount point paths inside the container to empty objects. */ + Volumes?: Record>; + /** The working directory for commands to run in. */ + WorkingDir?: string; + /** The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[\"\"]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). */ + Entrypoint?: string[]; + /** Disable networking for the container. */ + NetworkDisabled?: boolean; + /** MAC address of the container. Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. */ + MacAddress?: string; + /** `ONBUILD` metadata that were defined in the image's `Dockerfile`. */ + OnBuild?: string[]; + /** User-defined key/value metadata. */ + Labels?: Record; + /** Signal to stop a container as a string or unsigned integer. */ + StopSignal?: string; + /** Timeout to stop a container in seconds. */ + StopTimeout?: I64; + /** Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. */ + Shell?: string[]; +} + +/** Information about the storage driver used to store the container's and image's filesystem. */ +export interface GraphDriverData { + /** Name of the storage driver. */ + Name?: string; + /** Low-level storage metadata, provided as key/value pairs. This information is driver-specific, and depends on the storage-driver in use, and should be used for informational purposes only. */ + Data?: Record; +} + +/** Information about the image's RootFS, including the layer IDs. */ +export interface ImageInspectRootFs { + Type?: string; + Layers?: string[]; +} + +/** Additional metadata of the image in the local cache. This information is local to the daemon, and not part of the image itself. */ +export interface ImageInspectMetadata { + /** Date and time at which the image was last tagged in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. This information is only available if the image was tagged locally, and omitted otherwise. */ + LastTagTime?: string; +} + +/** Information about an image in the local image cache. */ +export interface Image { + /** ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. */ + Id?: string; + /** List of image names/tags in the local image cache that reference this image. Multiple image tags can refer to the same image, and this list may be empty if no tags reference the image, in which case the image is \"untagged\", in which case it can still be referenced by its ID. */ + RepoTags?: string[]; + /** List of content-addressable digests of locally available image manifests that the image is referenced from. Multiple manifests can refer to the same image. These digests are usually only available if the image was either pulled from a registry, or if the image was pushed to a registry, which is when the manifest is generated and its digest calculated. */ + RepoDigests?: string[]; + /** ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. */ + Parent?: string; + /** Optional message that was set when committing or importing the image. */ + Comment?: string; + /** Date and time at which the image was created, formatted in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. This information is only available if present in the image, and omitted otherwise. */ + Created?: string; + /** The version of Docker that was used to build the image. Depending on how the image was created, this field may be empty. */ + DockerVersion?: string; + /** Name of the author that was specified when committing the image, or as specified through MAINTAINER (deprecated) in the Dockerfile. */ + Author?: string; + /** Configuration for a container that is portable between hosts. */ + Config?: ContainerConfig; + /** Hardware CPU architecture that the image runs on. */ + Architecture?: string; + /** CPU architecture variant (presently ARM-only). */ + Variant?: string; + /** Operating System the image is built to run on. */ + Os?: string; + /** Operating System version the image is built to run on (especially for Windows). */ + OsVersion?: string; + /** Total size of the image including all layers it is composed of. */ + Size?: I64; + GraphDriver?: GraphDriverData; + RootFS?: ImageInspectRootFs; + Metadata?: ImageInspectMetadata; +} + +export type InspectDockerImageResponse = Image; + +/** individual image layer information in response to ImageHistory operation */ +export interface ImageHistoryResponseItem { + Id: string; + Created: I64; + CreatedBy: string; + Tags?: string[]; + Size: I64; + Comment: string; +} + +export type ListDockerImageHistoryResponse = ImageHistoryResponseItem[]; + +export enum ContainerStateStatusEnum { + Empty = "", + Created = "created", + Running = "running", + Paused = "paused", + Restarting = "restarting", + Removing = "removing", + Exited = "exited", + Dead = "dead", +} + +export interface ContainerListItem { + /** The first name in Names, not including the initial '/' */ + name: string; + /** The ID of this container */ + id?: string; + /** The name of the image used when creating this container */ + image?: string; + /** The ID of the image that this container was created from */ + image_id?: string; + /** When the container was created */ + created?: I64; + /** The size of files that have been created or changed by this container */ + size_rw?: I64; + /** The total size of all the files in this container */ + size_root_fs?: I64; + /** The state of this container (e.g. `exited`) */ + state: ContainerStateStatusEnum; + /** Additional human-readable status of this container (e.g. `Exit 0`) */ + status?: string; + /** The network mode */ + network_mode?: string; + /** The network names attached to container */ + networks: string[]; + /** The volume names attached to container */ + volumes: string[]; +} + +export type ListDockerContainersResponse = ContainerListItem[]; + +export enum HealthStatusEnum { + Empty = "", + None = "none", + Starting = "starting", + Healthy = "healthy", + Unhealthy = "unhealthy", +} + +/** HealthcheckResult stores information about a single run of a healthcheck probe */ +export interface HealthcheckResult { + /** Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. */ + Start?: string; + /** Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. */ + End?: string; + /** ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe */ + ExitCode?: I64; + /** Output from last check */ + Output?: string; +} + +/** Health stores information about the container's healthcheck results. */ +export interface ContainerHealth { + /** Status is one of `none`, `starting`, `healthy` or `unhealthy` - \"none\" Indicates there is no healthcheck - \"starting\" Starting indicates that the container is not yet ready - \"healthy\" Healthy indicates that the container is running correctly - \"unhealthy\" Unhealthy indicates that the container has a problem */ + Status?: HealthStatusEnum; + /** FailingStreak is the number of consecutive failures */ + FailingStreak?: I64; + /** Log contains the last few results (oldest first) */ + Log?: HealthcheckResult[]; +} + +/** ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the \"inspect\" command. */ +export interface ContainerState { + /** String representation of the container state. Can be one of \"created\", \"running\", \"paused\", \"restarting\", \"removing\", \"exited\", or \"dead\". */ + Status?: ContainerStateStatusEnum; + /** Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is \"running\". */ + Running?: boolean; + /** Whether this container is paused. */ + Paused?: boolean; + /** Whether this container is restarting. */ + Restarting?: boolean; + /** Whether a process within this container has been killed because it ran out of memory since the container was last started. */ + OOMKilled?: boolean; + Dead?: boolean; + /** The process ID of this container */ + Pid?: I64; + /** The last exit code of this container */ + ExitCode?: I64; + Error?: string; + /** The time when this container was last started. */ + StartedAt?: string; + /** The time when this container last exited. */ + FinishedAt?: string; + Health?: ContainerHealth; +} + +export type Usize = number; + +export interface ResourcesBlkioWeightDevice { + Path?: string; + Weight?: Usize; +} + +export interface ThrottleDevice { + /** Device path */ + Path?: string; + /** Rate */ + Rate?: I64; +} + +/** A device mapping between the host and container */ +export interface DeviceMapping { + PathOnHost?: string; + PathInContainer?: string; + CgroupPermissions?: string; +} + +/** A request for devices to be sent to device drivers */ +export interface DeviceRequest { + Driver?: string; + Count?: I64; + DeviceIDs?: string[]; + /** A list of capabilities; an OR list of AND lists of capabilities. */ + Capabilities?: string[][]; + /** Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. */ + Options?: Record; +} + +export interface ResourcesUlimits { + /** Name of ulimit */ + Name?: string; + /** Soft limit */ + Soft?: I64; + /** Hard limit */ + Hard?: I64; +} + +/** The logging configuration for this container */ +export interface HostConfigLogConfig { + Type?: string; + Config?: Record; +} + +/** PortBinding represents a binding between a host IP address and a host port. */ +export interface PortBinding { + /** Host IP address that the container's port is mapped to. */ + HostIp?: string; + /** Host port number that the container's port is mapped to. */ + HostPort?: string; +} + +export enum RestartPolicyNameEnum { + Empty = "", + No = "no", + Always = "always", + UnlessStopped = "unless-stopped", + OnFailure = "on-failure", +} + +/** The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. */ +export interface RestartPolicy { + /** - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero */ + Name?: RestartPolicyNameEnum; + /** If `on-failure` is used, the number of times to retry before giving up. */ + MaximumRetryCount?: I64; +} + +export enum MountTypeEnum { + Empty = "", + Bind = "bind", + Volume = "volume", + Tmpfs = "tmpfs", + Npipe = "npipe", + Cluster = "cluster", +} + +export enum MountBindOptionsPropagationEnum { + Empty = "", + Private = "private", + Rprivate = "rprivate", + Shared = "shared", + Rshared = "rshared", + Slave = "slave", + Rslave = "rslave", +} + +/** Optional configuration for the `bind` type. */ +export interface MountBindOptions { + /** A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`. */ + Propagation?: MountBindOptionsPropagationEnum; + /** Disable recursive bind mount. */ + NonRecursive?: boolean; + /** Create mount point on host if missing */ + CreateMountpoint?: boolean; + /** Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to `true` in conjunction). Addded in v1.44, before that version all read-only mounts were non-recursive by default. To match the previous behaviour this will default to `true` for clients on versions prior to v1.44. */ + ReadOnlyNonRecursive?: boolean; + /** Raise an error if the mount cannot be made recursively read-only. */ + ReadOnlyForceRecursive?: boolean; +} + +/** Map of driver specific options */ +export interface MountVolumeOptionsDriverConfig { + /** Name of the driver to use to create the volume. */ + Name?: string; + /** key/value map of driver specific options. */ + Options?: Record; +} + +/** Optional configuration for the `volume` type. */ +export interface MountVolumeOptions { + /** Populate volume with data from the target. */ + NoCopy?: boolean; + /** User-defined key/value metadata. */ + Labels?: Record; + DriverConfig?: MountVolumeOptionsDriverConfig; + /** Source path inside the volume. Must be relative without any back traversals. */ + Subpath?: string; +} + +/** Optional configuration for the `tmpfs` type. */ +export interface MountTmpfsOptions { + /** The size for the tmpfs mount in bytes. */ + SizeBytes?: I64; + /** The permission mode for the tmpfs mount in an integer. */ + Mode?: I64; +} + +export interface ContainerMount { + /** Container path. */ + Target?: string; + /** Mount source (e.g. a volume name, a host path). */ + Source?: string; + /** The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - `cluster` a Swarm cluster volume */ + Type?: MountTypeEnum; + /** Whether the mount should be read-only. */ + ReadOnly?: boolean; + /** The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`. */ + Consistency?: string; + BindOptions?: MountBindOptions; + VolumeOptions?: MountVolumeOptions; + TmpfsOptions?: MountTmpfsOptions; +} + +export enum HostConfigCgroupnsModeEnum { + Empty = "", + Private = "private", + Host = "host", +} + +export enum HostConfigIsolationEnum { + Empty = "", + Default = "default", + Process = "process", + Hyperv = "hyperv", +} + +/** Container configuration that depends on the host we are running on */ +export interface HostConfig { + /** An integer value representing this container's relative CPU weight versus other containers. */ + CpuShares?: I64; + /** Memory limit in bytes. */ + Memory?: I64; + /** Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. */ + CgroupParent?: string; + /** Block IO weight (relative weight). */ + BlkioWeight?: number; + /** Block IO weight (relative device weight) in the form: ``` [{\"Path\": \"device_path\", \"Weight\": weight}] ``` */ + BlkioWeightDevice?: ResourcesBlkioWeightDevice[]; + /** Limit read rate (bytes per second) from a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` */ + BlkioDeviceReadBps?: ThrottleDevice[]; + /** Limit write rate (bytes per second) to a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` */ + BlkioDeviceWriteBps?: ThrottleDevice[]; + /** Limit read rate (IO per second) from a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` */ + BlkioDeviceReadIOps?: ThrottleDevice[]; + /** Limit write rate (IO per second) to a device, in the form: ``` [{\"Path\": \"device_path\", \"Rate\": rate}] ``` */ + BlkioDeviceWriteIOps?: ThrottleDevice[]; + /** The length of a CPU period in microseconds. */ + CpuPeriod?: I64; + /** Microseconds of CPU time that the container can get in a CPU period. */ + CpuQuota?: I64; + /** The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. */ + CpuRealtimePeriod?: I64; + /** The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. */ + CpuRealtimeRuntime?: I64; + /** CPUs in which to allow execution (e.g., `0-3`, `0,1`). */ + CpusetCpus?: string; + /** Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. */ + CpusetMems?: string; + /** A list of devices to add to the container. */ + Devices?: DeviceMapping[]; + /** a list of cgroup rules to apply to the container */ + DeviceCgroupRules?: string[]; + /** A list of requests for devices to be sent to device drivers. */ + DeviceRequests?: DeviceRequest[]; + /** Hard limit for kernel TCP buffer memory (in bytes). Depending on the OCI runtime in use, this option may be ignored. It is no longer supported by the default (runc) runtime. This field is omitted when empty. */ + KernelMemoryTCP?: I64; + /** Memory soft limit in bytes. */ + MemoryReservation?: I64; + /** Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. */ + MemorySwap?: I64; + /** Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. */ + MemorySwappiness?: I64; + /** CPU quota in units of 10-9 CPUs. */ + NanoCpus?: I64; + /** Disable OOM Killer for the container. */ + OomKillDisable?: boolean; + /** Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. */ + Init?: boolean; + /** Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. */ + PidsLimit?: I64; + /** A list of resource limits to set in the container. For example: ``` {\"Name\": \"nofile\", \"Soft\": 1024, \"Hard\": 2048} ``` */ + Ulimits?: ResourcesUlimits[]; + /** The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. */ + CpuCount?: I64; + /** The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. */ + CpuPercent?: I64; + /** Maximum IOps for the container system drive (Windows only) */ + IOMaximumIOps?: I64; + /** Maximum IO in bytes per second for the container system drive (Windows only). */ + IOMaximumBandwidth?: I64; + /** A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. */ + Binds?: string[]; + /** Path to a file where the container ID is written */ + ContainerIDFile?: string; + LogConfig?: HostConfigLogConfig; + /** Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to. */ + NetworkMode?: string; + PortBindings?: Record; + RestartPolicy?: RestartPolicy; + /** Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. */ + AutoRemove?: boolean; + /** Driver that this container uses to mount volumes. */ + VolumeDriver?: string; + /** A list of volumes to inherit from another container, specified in the form `[:]`. */ + VolumesFrom?: string[]; + /** Specification for mounts to be added to the container. */ + Mounts?: ContainerMount[]; + /** Initial console size, as an `[height, width]` array. */ + ConsoleSize?: number[]; + /** Arbitrary non-identifying metadata attached to container and provided to the runtime when the container is started. */ + Annotations?: Record; + /** A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. */ + CapAdd?: string[]; + /** A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. */ + CapDrop?: string[]; + /** cgroup namespace mode for the container. Possible values are: - `\"private\"`: the container runs in its own private cgroup namespace - `\"host\"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `\"private\"` or `\"host\"`, depending on daemon version, kernel support and configuration. */ + CgroupnsMode?: HostConfigCgroupnsModeEnum; + /** A list of DNS servers for the container to use. */ + Dns?: string[]; + /** A list of DNS options. */ + DnsOptions?: string[]; + /** A list of DNS search domains. */ + DnsSearch?: string[]; + /** A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `[\"hostname:IP\"]`. */ + ExtraHosts?: string[]; + /** A list of additional groups that the container process will run as. */ + GroupAdd?: string[]; + /** IPC sharing mode for the container. Possible values are: - `\"none\"`: own private IPC namespace, with /dev/shm not mounted - `\"private\"`: own private IPC namespace - `\"shareable\"`: own private IPC namespace, with a possibility to share it with other containers - `\"container:\"`: join another (shareable) container's IPC namespace - `\"host\"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `\"private\"` or `\"shareable\"`, depending on daemon version and configuration. */ + IpcMode?: string; + /** Cgroup to use for the container. */ + Cgroup?: string; + /** A list of links for the container in the form `container_name:alias`. */ + Links?: string[]; + /** An integer value containing the score given to the container in order to tune OOM killer preferences. */ + OomScoreAdj?: I64; + /** Set the PID (Process) Namespace mode for the container. It can be either: - `\"container:\"`: joins another container's PID namespace - `\"host\"`: use the host's PID namespace inside the container */ + PidMode?: string; + /** Gives the container full access to the host. */ + Privileged?: boolean; + /** Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. */ + PublishAllPorts?: boolean; + /** Mount the container's root filesystem as read only. */ + ReadonlyRootfs?: boolean; + /** A list of string values to customize labels for MLS systems, such as SELinux. */ + SecurityOpt?: string[]; + /** Storage driver options for this container, in the form `{\"size\": \"120G\"}`. */ + StorageOpt?: Record; + /** A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { \"/run\": \"rw,noexec,nosuid,size=65536k\" } ``` */ + Tmpfs?: Record; + /** UTS namespace to use for the container. */ + UTSMode?: string; + /** Sets the usernamespace mode for the container when usernamespace remapping option is enabled. */ + UsernsMode?: string; + /** Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. */ + ShmSize?: I64; + /** A list of kernel parameters (sysctls) to set in the container. For example: ``` {\"net.ipv4.ip_forward\": \"1\"} ``` */ + Sysctls?: Record; + /** Runtime to use with this container. */ + Runtime?: string; + /** Isolation technology of the container. (Windows only) */ + Isolation?: HostConfigIsolationEnum; + /** The list of paths to be masked inside the container (this overrides the default set of paths). */ + MaskedPaths?: string[]; + /** The list of paths to be set as read-only inside the container (this overrides the default set of paths). */ + ReadonlyPaths?: string[]; +} + +/** MountPoint represents a mount point configuration inside the container. This is used for reporting the mountpoints in use by a container. */ +export interface MountPoint { + /** The mount type: - `bind` a mount of a file or directory from the host into the container. - `volume` a docker volume with the given `Name`. - `tmpfs` a `tmpfs`. - `npipe` a named pipe from the host into the container. - `cluster` a Swarm cluster volume */ + Type?: MountTypeEnum; + /** Name is the name reference to the underlying data defined by `Source` e.g., the volume name. */ + Name?: string; + /** Source location of the mount. For volumes, this contains the storage location of the volume (within `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains the source (host) part of the bind-mount. For `tmpfs` mount points, this field is empty. */ + Source?: string; + /** Destination is the path relative to the container root (`/`) where the `Source` is mounted inside the container. */ + Destination?: string; + /** Driver is the volume driver used to create the volume (if it is a volume). */ + Driver?: string; + /** Mode is a comma separated list of options supplied by the user when creating the bind/volume mount. The default is platform-specific (`\"z\"` on Linux, empty on Windows). */ + Mode?: string; + /** Whether the mount is mounted writable (read-write). */ + RW?: boolean; + /** Propagation describes how mounts are propagated from the host into the mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) for details. This field is not used on Windows. */ + Propagation?: string; +} + +/** EndpointIPAMConfig represents an endpoint's IPAM configuration. */ +export interface EndpointIpamConfig { + IPv4Address?: string; + IPv6Address?: string; + LinkLocalIPs?: string[]; +} + +/** Configuration for a network endpoint. */ +export interface EndpointSettings { + IPAMConfig?: EndpointIpamConfig; + Links?: string[]; + /** MAC address for the endpoint on this network. The network driver might ignore this parameter. */ + MacAddress?: string; + Aliases?: string[]; + /** Unique ID of the network. */ + NetworkID?: string; + /** Unique ID for the service endpoint in a Sandbox. */ + EndpointID?: string; + /** Gateway address for this network. */ + Gateway?: string; + /** IPv4 address. */ + IPAddress?: string; + /** Mask length of the IPv4 address. */ + IPPrefixLen?: I64; + /** IPv6 gateway address. */ + IPv6Gateway?: string; + /** Global IPv6 address. */ + GlobalIPv6Address?: string; + /** Mask length of the global IPv6 address. */ + GlobalIPv6PrefixLen?: I64; + /** DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. */ + DriverOpts?: Record; + /** List of all DNS names an endpoint has on a specific network. This list is based on the container name, network aliases, container short ID, and hostname. These DNS names are non-fully qualified but can contain several dots. You can get fully qualified DNS names by appending `.`. For instance, if container name is `my.ctr` and the network is named `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be `my.ctr.testnet`. */ + DNSNames?: string[]; +} + +/** NetworkSettings exposes the network settings in the API */ +export interface NetworkSettings { + /** Name of the default bridge interface when dockerd's --bridge flag is set. */ + Bridge?: string; + /** SandboxID uniquely represents a container's network stack. */ + SandboxID?: string; + Ports?: Record; + /** SandboxKey is the full path of the netns handle */ + SandboxKey?: string; + /** Information about all networks that the container is connected to. */ + Networks?: Record; +} + +export interface Container { + /** The ID of the container */ + Id?: string; + /** The time the container was created */ + Created?: string; + /** The path to the command being run */ + Path?: string; + /** The arguments to the command being run */ + Args?: string[]; + State?: ContainerState; + /** The container's image ID */ + Image?: string; + ResolvConfPath?: string; + HostnamePath?: string; + HostsPath?: string; + LogPath?: string; + Name?: string; + RestartCount?: I64; + Driver?: string; + Platform?: string; + MountLabel?: string; + ProcessLabel?: string; + AppArmorProfile?: string; + /** IDs of exec instances that are running in the container. */ + ExecIDs?: string[]; + HostConfig?: HostConfig; + GraphDriver?: GraphDriverData; + /** The size of files that have been created or changed by this container. */ + SizeRw?: I64; + /** The total size of all the files in this container. */ + SizeRootFs?: I64; + Mounts?: MountPoint[]; + Config?: ContainerConfig; + NetworkSettings?: NetworkSettings; +} + +export type InspectDockerContainerResponse = Container; + +export type GetContainerLogResponse = Log; + +export type SearchContainerLogResponse = Log; + +export enum VolumeScopeEnum { + Empty = "", + Local = "local", + Global = "global", +} + +export interface VolumeListItem { + /** The name of the volume */ + name: string; + driver: string; + mountpoint: string; + created?: string; + scope: VolumeScopeEnum; + /** Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `\"local\"` volume driver. For volumes created with other volume drivers, this field is set to `-1` (\"not available\") */ + size?: I64; + /** Whether the volume is currently attached to any container */ + in_use: boolean; +} + +export type ListDockerVolumesResponse = VolumeListItem[]; + +export type U64 = number; + +/** The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. */ +export interface ObjectVersion { + Index?: U64; +} + +export enum ClusterVolumeSpecAccessModeScopeEnum { + Empty = "", + Single = "single", + Multi = "multi", +} + +export enum ClusterVolumeSpecAccessModeSharingEnum { + Empty = "", + None = "none", + Readonly = "readonly", + Onewriter = "onewriter", + All = "all", +} + +/** One cluster volume secret entry. Defines a key-value pair that is passed to the plugin. */ +export interface ClusterVolumeSpecAccessModeSecrets { + /** Key is the name of the key of the key-value pair passed to the plugin. */ + Key?: string; + /** Secret is the swarm Secret object from which to read data. This can be a Secret name or ID. The Secret data is retrieved by swarm and used as the value of the key-value pair passed to the plugin. */ + Secret?: string; +} + +export type Topology = Record; + +/** Requirements for the accessible topology of the volume. These fields are optional. For an in-depth description of what these fields mean, see the CSI specification. */ +export interface ClusterVolumeSpecAccessModeAccessibilityRequirements { + /** A list of required topologies, at least one of which the volume must be accessible from. */ + Requisite?: Topology[]; + /** A list of topologies that the volume should attempt to be provisioned in. */ + Preferred?: Topology[]; +} + +/** The desired capacity that the volume should be created with. If empty, the plugin will decide the capacity. */ +export interface ClusterVolumeSpecAccessModeCapacityRange { + /** The volume must be at least this big. The value of 0 indicates an unspecified minimum */ + RequiredBytes?: I64; + /** The volume must not be bigger than this. The value of 0 indicates an unspecified maximum. */ + LimitBytes?: I64; +} + +export enum ClusterVolumeSpecAccessModeAvailabilityEnum { + Empty = "", + Active = "active", + Pause = "pause", + Drain = "drain", +} + +/** Defines how the volume is used by tasks. */ +export interface ClusterVolumeSpecAccessMode { + /** The set of nodes this volume can be used on at one time. - `single` The volume may only be scheduled to one node at a time. - `multi` the volume may be scheduled to any supported number of nodes at a time. */ + Scope?: ClusterVolumeSpecAccessModeScopeEnum; + /** The number and way that different tasks can use this volume at one time. - `none` The volume may only be used by one task at a time. - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - `all` The volume may have any number of readers and writers. */ + Sharing?: ClusterVolumeSpecAccessModeSharingEnum; + /** Swarm Secrets that are passed to the CSI storage plugin when operating on this volume. */ + Secrets?: ClusterVolumeSpecAccessModeSecrets[]; + AccessibilityRequirements?: ClusterVolumeSpecAccessModeAccessibilityRequirements; + CapacityRange?: ClusterVolumeSpecAccessModeCapacityRange; + /** The availability of the volume for use in tasks. - `active` The volume is fully available for scheduling on the cluster - `pause` No new workloads should use the volume, but existing workloads are not stopped. - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. */ + Availability?: ClusterVolumeSpecAccessModeAvailabilityEnum; +} + +/** Cluster-specific options used to create the volume. */ +export interface ClusterVolumeSpec { + /** Group defines the volume group of this volume. Volumes belonging to the same group can be referred to by group name when creating Services. Referring to a volume by group instructs Swarm to treat volumes in that group interchangeably for the purpose of scheduling. Volumes with an empty string for a group technically all belong to the same, emptystring group. */ + Group?: string; + AccessMode?: ClusterVolumeSpecAccessMode; +} + +/** Information about the global status of the volume. */ +export interface ClusterVolumeInfo { + /** The capacity of the volume in bytes. A value of 0 indicates that the capacity is unknown. */ + CapacityBytes?: I64; + /** A map of strings to strings returned from the storage plugin when the volume is created. */ + VolumeContext?: Record; + /** The ID of the volume as returned by the CSI storage plugin. This is distinct from the volume's ID as provided by Docker. This ID is never used by the user when communicating with Docker to refer to this volume. If the ID is blank, then the Volume has not been successfully created in the plugin yet. */ + VolumeID?: string; + /** The topology this volume is actually accessible from. */ + AccessibleTopology?: Topology[]; +} + +export enum ClusterVolumePublishStatusStateEnum { + Empty = "", + PendingPublish = "pending-publish", + Published = "published", + PendingNodeUnpublish = "pending-node-unpublish", + PendingControllerUnpublish = "pending-controller-unpublish", +} + +export interface ClusterVolumePublishStatus { + /** The ID of the Swarm node the volume is published on. */ + NodeID?: string; + /** The published state of the volume. * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. * `published` The volume is published successfully to the node. * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. */ + State?: ClusterVolumePublishStatusStateEnum; + /** A map of strings to strings returned by the CSI controller plugin when a volume is published. */ + PublishContext?: Record; +} + +/** Options and information specific to, and only present on, Swarm CSI cluster volumes. */ +export interface ClusterVolume { + /** The Swarm ID of this volume. Because cluster volumes are Swarm objects, they have an ID, unlike non-cluster volumes. This ID can be used to refer to the Volume instead of the name. */ + ID?: string; + Version?: ObjectVersion; + CreatedAt?: string; + UpdatedAt?: string; + Spec?: ClusterVolumeSpec; + Info?: ClusterVolumeInfo; + /** The status of the volume as it pertains to its publishing and use on specific nodes */ + PublishStatus?: ClusterVolumePublishStatus[]; +} + +/** Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. */ +export interface VolumeUsageData { + /** Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `\"local\"` volume driver. For volumes created with other volume drivers, this field is set to `-1` (\"not available\") */ + Size: I64; + /** The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. */ + RefCount: I64; +} + +export interface Volume { + /** Name of the volume. */ + Name: string; + /** Name of the volume driver used by the volume. */ + Driver: string; + /** Mount path of the volume on the host. */ + Mountpoint: string; + /** Date/Time the volume was created. */ + CreatedAt?: string; + /** Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{\"key\":\"value\",\"key2\":\"value2\"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. */ + Status?: Record>; + /** User-defined key/value metadata. */ + Labels?: Record; + /** The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. */ + Scope?: VolumeScopeEnum; + ClusterVolume?: ClusterVolume; + /** The driver specific options used when creating the volume. */ + Options?: Record; + UsageData?: VolumeUsageData; +} + +export type InspectDockerVolumeResponse = Volume; export interface ComposeProject { /** The compose project name. */ @@ -1535,7 +2360,7 @@ export interface StackConfig { /** * Whether to use https to clone the repo (versus http). Default: true * - * Note. Monitor does not currently support cloning repos via ssh. + * Note. Komodo does not currently support cloning repos via ssh. */ git_https: boolean; /** @@ -1581,7 +2406,7 @@ export interface StackServiceNames { * * 1. The name of the compose project (top level name field of compose file). * This defaults to the name of the parent folder of the compose file. - * Monitor will always set it to be the name of the stack, but imported stacks + * Komodo will always set it to be the name of the stack, but imported stacks * will have a different name. * 2. The service name * 3. The replica number @@ -1602,8 +2427,8 @@ export interface StackInfo { missing_files?: string[]; /** * The deployed project name. - * This is updated whenever Monitor successfully deploys the stack. - * If it is present, Monitor will use it for actions over other options, + * This is updated whenever Komodo successfully deploys the stack. + * If it is present, Komodo will use it for actions over other options, * to ensure control is maintained after changing the project name (there is no rename compose project api). */ deployed_project_name?: string; @@ -1611,7 +2436,7 @@ export interface StackInfo { deployed_hash?: string; /** Deployed commit message, or null. Only for repo based stacks */ deployed_message?: string; - /** The deployed compose file contents. This is updated whenever Monitor successfully deploys the stack. */ + /** The deployed compose file contents. This is updated whenever Komodo successfully deploys the stack. */ deployed_contents?: ComposeContents[]; /** * The deployed service names. @@ -1625,7 +2450,7 @@ export interface StackInfo { latest_services?: StackServiceNames[]; /** * The remote compose file contents, whether on host or in repo. - * This is updated whenever Monitor refreshes the stack cache. + * This is updated whenever Komodo refreshes the stack cache. * It will be empty if the file is defined directly in the stack config. */ remote_contents?: ComposeContents[]; @@ -1645,7 +2470,7 @@ export interface StackService { /** The service name */ service: string; /** The container */ - container?: ContainerSummary; + container?: ContainerListItem; } export type ListStackServicesResponse = StackService[]; @@ -1740,7 +2565,7 @@ export interface ResourceSyncConfig { /** * Whether to use https to clone the repo (versus http). Default: true * - * Note. Monitor does not currently support cloning repos via ssh. + * Note. Komodo does not currently support cloning repos via ssh. */ git_https: boolean; /** The Github repo used as the source of the build. */ @@ -1760,7 +2585,7 @@ export interface ResourceSyncConfig { /** * The path of the resource file(s) to sync, relative to the repo root. * Can be a specific file, or a directory containing multiple files / folders. - * See `https://docs.monitor.dev/docs/sync-resources` for more information. + * See [https://komo.do/docs/sync-resources](https://komo.do/docs/sync-resources) for more information. */ resource_path: string; /** @@ -1881,12 +2706,26 @@ export enum Operation { UpdateServer = "UpdateServer", DeleteServer = "DeleteServer", RenameServer = "RenameServer", - PruneImages = "PruneImages", + StartContainer = "StartContainer", + RestartContainer = "RestartContainer", + PauseContainer = "PauseContainer", + UnpauseContainer = "UnpauseContainer", + StopContainer = "StopContainer", + DestroyContainer = "DestroyContainer", + StartAllContainers = "StartAllContainers", + RestartAllContainers = "RestartAllContainers", + PauseAllContainers = "PauseAllContainers", + UnpauseAllContainers = "UnpauseAllContainers", + StopAllContainers = "StopAllContainers", PruneContainers = "PruneContainers", - PruneNetworks = "PruneNetworks", CreateNetwork = "CreateNetwork", DeleteNetwork = "DeleteNetwork", - StopAllContainers = "StopAllContainers", + PruneNetworks = "PruneNetworks", + DeleteImage = "DeleteImage", + PruneImages = "PruneImages", + DeleteVolume = "DeleteVolume", + PruneVolumes = "PruneVolumes", + PruneSystem = "PruneSystem", CreateBuild = "CreateBuild", UpdateBuild = "UpdateBuild", DeleteBuild = "DeleteBuild", @@ -1899,12 +2738,12 @@ export enum Operation { UpdateDeployment = "UpdateDeployment", DeleteDeployment = "DeleteDeployment", Deploy = "Deploy", - StartContainer = "StartContainer", - RestartContainer = "RestartContainer", - PauseContainer = "PauseContainer", - UnpauseContainer = "UnpauseContainer", - StopContainer = "StopContainer", - RemoveContainer = "RemoveContainer", + StartDeployment = "StartDeployment", + RestartDeployment = "RestartDeployment", + PauseDeployment = "PauseDeployment", + UnpauseDeployment = "UnpauseDeployment", + StopDeployment = "StopDeployment", + DestroyDeployment = "DestroyDeployment", RenameDeployment = "RenameDeployment", CreateRepo = "CreateRepo", UpdateRepo = "UpdateRepo", @@ -1966,7 +2805,7 @@ export enum UpdateStatus { Complete = "Complete", } -/** Represents an action performed by Monitor. */ +/** Represents an action performed by Komodo. */ export interface Update { /** * The Mongo ID of the update. @@ -2079,6 +2918,15 @@ export interface Variable { description?: string; /** The value associated with the variable. */ value?: string; + /** + * If marked as secret, the variable value will be hidden in updates / logs. + * Additionally the value will not be served in read requests by non admin users. + * + * Note that the value is NOT encrypted in the database, and will likely show up in database logs. + * The security of these variables comes down to the security + * of the database (system level encryption, network isolation, etc.) + */ + is_secret?: boolean; } export type GetVariableResponse = Variable; @@ -2168,6 +3016,8 @@ export type UpdateVariableValueResponse = Variable; export type UpdateVariableDescriptionResponse = Variable; +export type UpdateVariableIsSecretResponse = Variable; + export type DeleteVariableResponse = Variable; export type _PartialAlerterConfig = Partial; @@ -2239,8 +3089,6 @@ export interface DeploymentQuerySpecifics { export type DeploymentQuery = ResourceQuery; -export type U64 = number; - export type MongoDocument = any; export type JsonValue = any; @@ -2312,7 +3160,7 @@ export type _PartialTag = Partial; /** * Non authenticated route to see the available options - * users have to login to monitor, eg. local auth, github, google. + * users have to login to Komodo, eg. local auth, github, google. * Response: [GetLoginOptionsResponse]. */ export interface GetLoginOptions { @@ -2427,7 +3275,7 @@ export interface Deploy { * * 1. Runs `docker start ${container_name}`. */ -export interface StartContainer { +export interface StartDeployment { /** Name or id */ deployment: string; } @@ -2437,7 +3285,7 @@ export interface StartContainer { * * 1. Runs `docker restart ${container_name}`. */ -export interface RestartContainer { +export interface RestartDeployment { /** Name or id */ deployment: string; } @@ -2447,7 +3295,7 @@ export interface RestartContainer { * * 1. Runs `docker pause ${container_name}`. */ -export interface PauseContainer { +export interface PauseDeployment { /** Name or id */ deployment: string; } @@ -2459,7 +3307,7 @@ export interface PauseContainer { * * Note. This is the only way to restart a paused container. */ -export interface UnpauseContainer { +export interface UnpauseDeployment { /** Name or id */ deployment: string; } @@ -2469,7 +3317,7 @@ export interface UnpauseContainer { * * 1. Runs `docker stop ${container_name}`. */ -export interface StopContainer { +export interface StopDeployment { /** Name or id */ deployment: string; /** Override the default termination signal specified in the deployment. */ @@ -2479,12 +3327,12 @@ export interface StopContainer { } /** - * Stops and removes the container for the target deployment. + * Stops and destroys the container for the target deployment. * Reponse: [Update]. * * 1. The container is stopped and removed using `docker container rm ${container_name}`. */ -export interface RemoveContainer { +export interface DestroyDeployment { /** Name or id. */ deployment: string; /** Override the default termination signal specified in the deployment. */ @@ -2559,12 +3407,140 @@ export interface CancelRepoBuild { repo: string; } +/** + * Starts the container on the target server. Response: [Update] + * + * 1. Runs `docker start ${container_name}`. + */ +export interface StartContainer { + /** Name or id */ + server: string; + /** The container name */ + container: string; +} + +/** + * Restarts the container on the target server. Response: [Update] + * + * 1. Runs `docker restart ${container_name}`. + */ +export interface RestartContainer { + /** Name or id */ + server: string; + /** The container name */ + container: string; +} + +/** + * Pauses the container on the target server. Response: [Update] + * + * 1. Runs `docker pause ${container_name}`. + */ +export interface PauseContainer { + /** Name or id */ + server: string; + /** The container name */ + container: string; +} + +/** + * Unpauses the container on the target server. Response: [Update] + * + * 1. Runs `docker unpause ${container_name}`. + * + * Note. This is the only way to restart a paused container. + */ +export interface UnpauseContainer { + /** Name or id */ + server: string; + /** The container name */ + container: string; +} + +/** + * Stops the container on the target server. Response: [Update] + * + * 1. Runs `docker stop ${container_name}`. + */ +export interface StopContainer { + /** Name or id */ + server: string; + /** The container name */ + container: string; + /** Override the default termination signal. */ + signal?: TerminationSignal; + /** Override the default termination max time. */ + time?: number; +} + +/** + * Stops and destroys the container on the target server. + * Reponse: [Update]. + * + * 1. The container is stopped and removed using `docker container rm ${container_name}`. + */ +export interface DestroyContainer { + /** Name or id */ + server: string; + /** The container name */ + container: string; + /** Override the default termination signal. */ + signal?: TerminationSignal; + /** Override the default termination max time. */ + time?: number; +} + +/** Starts all containers on the target server. Response: [Update] */ +export interface StartAllContainers { + /** Name or id */ + server: string; +} + +/** Restarts all containers on the target server. Response: [Update] */ +export interface RestartAllContainers { + /** Name or id */ + server: string; +} + +/** Pauses all containers on the target server. Response: [Update] */ +export interface PauseAllContainers { + /** Name or id */ + server: string; +} + +/** Unpauses all containers on the target server. Response: [Update] */ +export interface UnpauseAllContainers { + /** Name or id */ + server: string; +} + /** Stops all containers on the target server. Response: [Update] */ export interface StopAllContainers { /** Name or id */ server: string; } +/** + * Prunes the docker containers on the target server. Response: [Update]. + * + * 1. Runs `docker container prune -f`. + */ +export interface PruneContainers { + /** Id or name */ + server: string; +} + +/** + * Delete a docker network. + * Response: [Update] + */ +export interface DeleteNetwork { + /** Id or name. */ + server: string; + /** The name of the network to delete. */ + name: string; +} + /** * Prunes the docker networks on the target server. Response: [Update]. * @@ -2575,6 +3551,17 @@ export interface PruneNetworks { server: string; } +/** + * Delete a docker image. + * Response: [Update] + */ +export interface DeleteImage { + /** Id or name. */ + server: string; + /** The name of the image to delete. */ + name: string; +} + /** * Prunes the docker images on the target server. Response: [Update]. * @@ -2586,11 +3573,32 @@ export interface PruneImages { } /** - * Prunes the docker containers on the target server. Response: [Update]. - * - * 1. Runs `docker container prune -f`. + * Delete a docker volume. + * Response: [Update] */ -export interface PruneContainers { +export interface DeleteVolume { + /** Id or name. */ + server: string; + /** The name of the volume to delete. */ + name: string; +} + +/** + * Prunes the docker volumes on the target server. Response: [Update]. + * + * 1. Runs `docker volume prune -a -f`. + */ +export interface PruneVolumes { + /** Id or name */ + server: string; +} + +/** + * Prunes the docker system on the target server, including volumes. Response: [Update]. + * + * 1. Runs `docker system prune -a -f --volumes`. + */ +export interface PruneSystem { /** Id or name */ server: string; } @@ -2800,7 +3808,7 @@ export interface GetBuildsSummary { /** Response for [GetBuildsSummary]. */ export interface GetBuildsSummaryResponse { - /** The total number of builds in monitor. */ + /** The total number of builds in Komodo. */ total: number; /** The number of builds with Ok state. */ ok: number; @@ -2954,7 +3962,7 @@ export interface GetDeploymentContainer { /** Response for [GetDeploymentContainer]. */ export interface GetDeploymentContainerResponse { state: DeploymentState; - container?: ContainerSummary; + container?: ContainerListItem; } /** @@ -2963,7 +3971,7 @@ export interface GetDeploymentContainerResponse { * * Note. This call will hit the underlying server directly for most up to date log. */ -export interface GetLog { +export interface GetDeploymentLog { /** Id or name */ deployment: string; /** @@ -2985,7 +3993,7 @@ export enum SearchCombinator { * * Note. This call will hit the underlying server directly for most up to date log. */ -export interface SearchLog { +export interface SearchDeploymentLog { /** Id or name */ deployment: string; /** The terms to search for. */ @@ -3047,7 +4055,7 @@ export interface ListCommonDeploymentExtraArgs { } /** - * Get the version of the core api. + * Get the version of the Komodo Core api. * Response: [GetVersionResponse]. */ export interface GetVersion { @@ -3374,7 +4382,7 @@ export interface GetServerActionState { } /** - * Get the version of the monitor periphery agent on the target server. + * Get the version of the Komodo Periphery agent on the target server. * Response: [GetPeripheryVersionResponse]. */ export interface GetPeripheryVersion { @@ -3394,6 +4402,14 @@ export interface ListDockerNetworks { server: string; } +/** Inspect a docker network on the server. Response: [InspectDockerNetworkResponse]. */ +export interface InspectDockerNetwork { + /** Id or name */ + server: string; + /** The network name */ + network: string; +} + /** * List the docker images locally cached on the target server. * Response: [ListDockerImagesResponse]. @@ -3403,6 +4419,22 @@ export interface ListDockerImages { server: string; } +/** Inspect a docker image on the server. Response: [Image]. */ +export interface InspectDockerImage { + /** Id or name */ + server: string; + /** The image name */ + image: string; +} + +/** Get image history from the server. Response: [ListDockerImageHistoryResponse]. */ +export interface ListDockerImageHistory { + /** Id or name */ + server: string; + /** The image name */ + image: string; +} + /** * List all docker containers on the target server. * Response: [ListDockerContainersResponse]. @@ -3412,8 +4444,88 @@ export interface ListDockerContainers { server: string; } +/** Inspect a docker container on the server. Response: [Container]. */ +export interface InspectDockerContainer { + /** Id or name */ + server: string; + /** The container name */ + container: string; +} + /** - * List all compose projects on the target server. + * Get the container log's tail, split by stdout/stderr. + * Response: [Log]. + * + * Note. This call will hit the underlying server directly for most up to date log. + */ +export interface GetContainerLog { + /** Id or name */ + server: string; + /** The container name */ + container: string; + /** + * The number of lines of the log tail to include. + * Default: 100. + * Max: 5000. + */ + tail: U64; +} + +/** + * Search the container log's tail using `grep`. All lines go to stdout. + * Response: [Log]. + * + * Note. This call will hit the underlying server directly for most up to date log. + */ +export interface SearchContainerLog { + /** Id or name */ + server: string; + /** The container name */ + container: string; + /** The terms to search for. */ + terms: string[]; + /** + * When searching for multiple terms, can use `AND` or `OR` combinator. + * + * - `AND`: Only include lines with **all** terms present in that line. + * - `OR`: Include lines that have one or more matches in the terms. + */ + combinator?: SearchCombinator; + /** Invert the results, ie return all lines that DON'T match the terms / combinator. */ + invert?: boolean; +} + +/** Inspect a docker container on the server. Response: [Container]. */ +export interface GetResourceMatchingContainer { + /** Id or name */ + server: string; + /** The container name */ + container: string; +} + +export interface GetResourceMatchingContainerResponse { + resource?: ResourceTarget; +} + +/** + * List all docker volumes on the target server. + * Response: [ListDockerVolumesResponse]. + */ +export interface ListDockerVolumes { + /** Id or name */ + server: string; +} + +/** Inspect a docker volume on the server. Response: [Volume]. */ +export interface InspectDockerVolume { + /** Id or name */ + server: string; + /** The volume name */ + volume: string; +} + +/** + * List all docker compose projects on the target server. * Response: [ListComposeProjectsResponse]. */ export interface ListComposeProjects { @@ -3802,7 +4914,7 @@ export interface ListUpdates { page?: number; } -/** Minimal representation of an action performed by Monitor. */ +/** Minimal representation of an action performed by Komodo. */ export interface UpdateListItem { /** The id of the update */ id: string; @@ -3876,7 +4988,7 @@ export interface FindUser { /** * **Admin only.** - * Gets list of monitor users. + * Gets list of Komodo users. * Response: [ListUsersResponse] */ export interface ListUsers { @@ -3920,6 +5032,9 @@ export interface ListUserGroups { /** * List all available global variables. * Response: [Variable] + * + * Note. For non admin users making this call, + * secret variables will have their values obscured. */ export interface GetVariable { /** The name of the variable to get. */ @@ -3929,6 +5044,9 @@ export interface GetVariable { /** * List all available global variables. * Response: [ListVariablesResponse] + * + * Note. For non admin users making this call, + * secret variables will have their values obscured. */ export interface ListVariables { } @@ -4444,7 +5562,7 @@ export enum RepoWebhookAction { } /** - * Create a webhook on the github repo attached to the (monitor) repo + * Create a webhook on the github repo attached to the (Komodo) Repo resource. * passed in request. Response: [CreateRepoWebhookResponse] */ export interface CreateRepoWebhook { @@ -4455,7 +5573,7 @@ export interface CreateRepoWebhook { } /** - * Delete the webhook on the github repo attached to the (monitor) repo + * Delete the webhook on the github repo attached to the (Komodo) Repo resource. * passed in request. Response: [DeleteRepoWebhookResponse] */ export interface DeleteRepoWebhook { @@ -4518,17 +5636,6 @@ export interface CreateNetwork { name: string; } -/** - * Delete a docker network. - * Response: [Update] - */ -export interface DeleteNetwork { - /** Id or name. */ - server: string; - /** The name of the network to delete. */ - name: string; -} - export type PartialServerTemplateConfig = | { type: "Aws", params: _PartialAwsServerTemplateConfig } | { type: "Hetzner", params: _PartialHetznerServerTemplateConfig }; @@ -4861,9 +5968,11 @@ export interface CreateVariable { value?: string; /** The initial value of the description. default: "". */ description?: string; + /** Whether to make this a secret variable. */ + is_secret?: boolean; } -/** **Admin only.** Update variable. Response: [Variable]. */ +/** **Admin only.** Update variable value. Response: [Variable]. */ export interface UpdateVariableValue { /** The name of the variable to update. */ name: string; @@ -4871,7 +5980,7 @@ export interface UpdateVariableValue { value: string; } -/** **Admin only.** Update variable. Response: [Variable]. */ +/** **Admin only.** Update variable description. Response: [Variable]. */ export interface UpdateVariableDescription { /** The name of the variable to update. */ name: string; @@ -4879,6 +5988,14 @@ export interface UpdateVariableDescription { description: string; } +/** **Admin only.** Update whether variable is secret. Response: [Variable]. */ +export interface UpdateVariableIsSecret { + /** The name of the variable to update. */ + name: string; + /** Whether variable is secret. */ + is_secret: boolean; +} + /** **Admin only.** Delete a variable. Response: [Variable]. */ export interface DeleteVariable { name: string; @@ -4909,7 +6026,7 @@ export interface StandardRegistryConfig { organization?: string; } -/** Configuration for a monitor server builder. */ +/** Configuration for a Komodo Server Builder. */ export interface ServerBuilderConfig { /** The server id of the builder */ server_id: string; @@ -4961,6 +6078,29 @@ export interface AwsBuilderConfig { secrets?: string[]; } +export interface NameAndId { + name: string; + id: string; +} + +export enum PortTypeEnum { + EMPTY = "", + TCP = "tcp", + UDP = "udp", + SCTP = "sctp", +} + +/** An open port on a container */ +export interface Port { + /** Host IP address that the container's port is mapped to */ + IP?: string; + /** Port on the container */ + PrivatePort?: number; + /** Port exposed on the host */ + PublicPort?: number; + Type?: PortTypeEnum; +} + export interface LatestCommit { hash: string; message: string; @@ -4989,14 +6129,6 @@ export interface CloneArgs { account?: string; } -/** Info for the all system disks combined. */ -export interface TotalDiskUsage { - /** Used portion in GB */ - used_gb: number; - /** Total size in GB */ - total_gb: number; -} - /** Summary of the health of the server. */ export interface ServerHealth { cpu: SeverityLevel; @@ -5069,6 +6201,7 @@ export enum HetznerDatacenter { Falkenstein1Dc14 = "Falkenstein1Dc14", AshburnDc1 = "AshburnDc1", HillsboroDc1 = "HillsboroDc1", + SingaporeDc1 = "SingaporeDc1", } export enum HetznerServerType { @@ -5178,6 +6311,14 @@ export interface ComposeFile { services?: Record; } +/** Info for the all system disks combined. */ +export interface TotalDiskUsage { + /** Used portion in GB */ + used_gb: number; + /** Total size in GB */ + total_gb: number; +} + export interface SyncDeployUpdate { /** Resources to deploy */ to_deploy: number; @@ -5237,17 +6378,32 @@ export type AuthRequest = | { type: "GetUser", params: GetUser }; export type ExecuteRequest = - | { type: "StopAllContainers", params: StopAllContainers } - | { type: "PruneContainers", params: PruneContainers } - | { type: "PruneImages", params: PruneImages } - | { type: "PruneNetworks", params: PruneNetworks } - | { type: "Deploy", params: Deploy } | { type: "StartContainer", params: StartContainer } | { type: "RestartContainer", params: RestartContainer } | { type: "PauseContainer", params: PauseContainer } | { type: "UnpauseContainer", params: UnpauseContainer } | { type: "StopContainer", params: StopContainer } - | { type: "RemoveContainer", params: RemoveContainer } + | { type: "DestroyContainer", params: DestroyContainer } + | { type: "StartAllContainers", params: StartAllContainers } + | { type: "RestartAllContainers", params: RestartAllContainers } + | { type: "PauseAllContainers", params: PauseAllContainers } + | { type: "UnpauseAllContainers", params: UnpauseAllContainers } + | { type: "StopAllContainers", params: StopAllContainers } + | { type: "PruneContainers", params: PruneContainers } + | { type: "DeleteNetwork", params: DeleteNetwork } + | { type: "PruneNetworks", params: PruneNetworks } + | { type: "DeleteImage", params: DeleteImage } + | { type: "PruneImages", params: PruneImages } + | { type: "DeleteVolume", params: DeleteVolume } + | { type: "PruneVolumes", params: PruneVolumes } + | { type: "PruneSystem", params: PruneSystem } + | { type: "Deploy", params: Deploy } + | { type: "StartDeployment", params: StartDeployment } + | { type: "RestartDeployment", params: RestartDeployment } + | { type: "PauseDeployment", params: PauseDeployment } + | { type: "UnpauseDeployment", params: UnpauseDeployment } + | { type: "StopDeployment", params: StopDeployment } + | { type: "DestroyDeployment", params: DestroyDeployment } | { type: "DeployStack", params: DeployStack } | { type: "StartStack", params: StartStack } | { type: "RestartStack", params: RestartStack } @@ -5300,17 +6456,26 @@ export type ReadRequest = | { type: "GetHistoricalServerStats", params: GetHistoricalServerStats } | { type: "ListServers", params: ListServers } | { type: "ListFullServers", params: ListFullServers } + | { type: "InspectDockerContainer", params: InspectDockerContainer } + | { type: "GetResourceMatchingContainer", params: GetResourceMatchingContainer } + | { type: "GetContainerLog", params: GetContainerLog } + | { type: "SearchContainerLog", params: SearchContainerLog } + | { type: "InspectDockerNetwork", params: InspectDockerNetwork } + | { type: "InspectDockerImage", params: InspectDockerImage } + | { type: "ListDockerImageHistory", params: ListDockerImageHistory } + | { type: "InspectDockerVolume", params: InspectDockerVolume } | { type: "ListDockerContainers", params: ListDockerContainers } | { type: "ListDockerNetworks", params: ListDockerNetworks } | { type: "ListDockerImages", params: ListDockerImages } + | { type: "ListDockerVolumes", params: ListDockerVolumes } | { type: "ListComposeProjects", params: ListComposeProjects } | { type: "GetDeploymentsSummary", params: GetDeploymentsSummary } | { type: "GetDeployment", params: GetDeployment } | { type: "GetDeploymentContainer", params: GetDeploymentContainer } | { type: "GetDeploymentActionState", params: GetDeploymentActionState } | { type: "GetDeploymentStats", params: GetDeploymentStats } - | { type: "GetLog", params: GetLog } - | { type: "SearchLog", params: SearchLog } + | { type: "GetDeploymentLog", params: GetDeploymentLog } + | { type: "SearchDeploymentLog", params: SearchDeploymentLog } | { type: "ListDeployments", params: ListDeployments } | { type: "ListFullDeployments", params: ListFullDeployments } | { type: "ListCommonDeploymentExtraArgs", params: ListCommonDeploymentExtraArgs } @@ -5397,7 +6562,6 @@ export type WriteRequest = | { type: "UpdateServer", params: UpdateServer } | { type: "RenameServer", params: RenameServer } | { type: "CreateNetwork", params: CreateNetwork } - | { type: "DeleteNetwork", params: DeleteNetwork } | { type: "CreateDeployment", params: CreateDeployment } | { type: "CopyDeployment", params: CopyDeployment } | { type: "DeleteDeployment", params: DeleteDeployment } @@ -5455,6 +6619,7 @@ export type WriteRequest = | { type: "CreateVariable", params: CreateVariable } | { type: "UpdateVariableValue", params: UpdateVariableValue } | { type: "UpdateVariableDescription", params: UpdateVariableDescription } + | { type: "UpdateVariableIsSecret", params: UpdateVariableIsSecret } | { type: "DeleteVariable", params: DeleteVariable } | { type: "CreateGitProviderAccount", params: CreateGitProviderAccount } | { type: "UpdateGitProviderAccount", params: UpdateGitProviderAccount } diff --git a/client/periphery/rs/Cargo.toml b/client/periphery/rs/Cargo.toml index 3a0ca6052..00810f81a 100644 --- a/client/periphery/rs/Cargo.toml +++ b/client/periphery/rs/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true [dependencies] # local -monitor_client.workspace = true +komodo_client.workspace = true # mogh serror.workspace = true resolver_api.workspace = true diff --git a/client/periphery/rs/src/api/build.rs b/client/periphery/rs/src/api/build.rs index 9018bf9c4..87d48918b 100644 --- a/client/periphery/rs/src/api/build.rs +++ b/client/periphery/rs/src/api/build.rs @@ -1,6 +1,5 @@ -use monitor_client::entities::{ - config::core::AwsEcrConfig, server::docker_image::ImageSummary, - update::Log, +use komodo_client::entities::{ + config::core::AwsEcrConfig, update::Log, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; @@ -8,7 +7,7 @@ use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(BuildResponse)] pub struct Build { - pub build: monitor_client::entities::build::Build, + pub build: komodo_client::entities::build::Build, /// Override registry token with one sent from core. pub registry_token: Option, /// Propogate AwsEcrConfig from core @@ -22,21 +21,3 @@ pub struct Build { } pub type BuildResponse = Vec; - -// - -#[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(GetImageListResponse)] -pub struct GetImageList {} - -pub type GetImageListResponse = Vec; - -// - -#[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(PruneImagesResponse)] -pub struct PruneImages {} - -pub type PruneImagesResponse = Log; - -// diff --git a/client/periphery/rs/src/api/compose.rs b/client/periphery/rs/src/api/compose.rs index 28c7e5993..1e23c6866 100644 --- a/client/periphery/rs/src/api/compose.rs +++ b/client/periphery/rs/src/api/compose.rs @@ -1,4 +1,4 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ stack::{ComposeContents, ComposeProject, Stack}, update::Log, SearchCombinator, @@ -89,6 +89,9 @@ pub struct ComposeUp { pub git_token: Option, /// If provided, use it to login in. Otherwise check periphery local registries. pub registry_token: Option, + /// Propogate any secret replacers from core interpolation. + #[serde(default)] + pub replacers: Vec<(String, String)>, } #[derive(Debug, Clone, Default, Serialize, Deserialize)] diff --git a/client/periphery/rs/src/api/container.rs b/client/periphery/rs/src/api/container.rs index 63726faf6..dfff43742 100644 --- a/client/periphery/rs/src/api/container.rs +++ b/client/periphery/rs/src/api/container.rs @@ -1,18 +1,20 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ config::core::AwsEcrConfig, - deployment::{ - ContainerSummary, Deployment, DockerContainerStats, - TerminationSignal, - }, + deployment::Deployment, + docker::container::{Container, ContainerStats}, update::Log, - SearchCombinator, + SearchCombinator, TerminationSignal, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; +// + #[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Vec)] -pub struct GetContainerList {} +#[response(Container)] +pub struct InspectContainer { + pub name: String, +} // @@ -44,7 +46,7 @@ pub struct GetContainerLogSearch { // #[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(DockerContainerStats)] +#[response(ContainerStats)] pub struct GetContainerStats { pub name: String, } @@ -52,11 +54,15 @@ pub struct GetContainerStats { // #[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Vec)] +#[response(Vec)] pub struct GetContainerStatsList {} // +// ======= +// ACTIONS +// ======= + #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(Log)] pub struct Deploy { @@ -116,12 +122,6 @@ pub struct StopContainer { // -#[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Vec)] -pub struct StopAllContainers {} - -// - #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(Log)] pub struct RemoveContainer { @@ -146,3 +146,31 @@ pub struct RenameContainer { pub struct PruneContainers {} // + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Vec)] +pub struct StartAllContainers {} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Vec)] +pub struct RestartAllContainers {} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Vec)] +pub struct PauseAllContainers {} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Vec)] +pub struct UnpauseAllContainers {} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Vec)] +pub struct StopAllContainers {} diff --git a/client/periphery/rs/src/api/git.rs b/client/periphery/rs/src/api/git.rs index 306cfcc95..49ae50df0 100644 --- a/client/periphery/rs/src/api/git.rs +++ b/client/periphery/rs/src/api/git.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use monitor_client::entities::{ +use komodo_client::entities::{ update::Log, CloneArgs, EnvironmentVar, LatestCommit, SystemCommand, }; use resolver_api::derive::Request; @@ -24,6 +24,9 @@ pub struct CloneRepo { pub skip_secret_interp: bool, /// Override git token with one sent from core. pub git_token: Option, + /// Propogate any secret replacers from core interpolation. + #[serde(default)] + pub replacers: Vec<(String, String)>, } fn default_env_file_path() -> String { @@ -45,42 +48,15 @@ pub struct PullRepo { pub env_file_path: String, #[serde(default)] pub skip_secret_interp: bool, -} - -// - -/// Backward compat adapter for v1.13 upgrade. -#[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(untagged)] -pub enum RepoActionResponse { - V1_13(RepoActionResponseV1_13), - V1_12(Vec), -} - -impl From for RepoActionResponseV1_13 { - fn from(value: RepoActionResponse) -> Self { - match value { - RepoActionResponse::V1_13(response) => response, - RepoActionResponse::V1_12(logs) => RepoActionResponseV1_13 { - logs, - commit_hash: None, - commit_message: None, - env_file_path: None, - }, - } - } -} - -impl From for RepoActionResponse { - fn from(value: RepoActionResponseV1_13) -> Self { - RepoActionResponse::V1_13(value) - } + /// Propogate any secret replacers from core interpolation. + #[serde(default)] + pub replacers: Vec<(String, String)>, } // #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct RepoActionResponseV1_13 { +pub struct RepoActionResponse { pub logs: Vec, pub commit_hash: Option, pub commit_message: Option, diff --git a/client/periphery/rs/src/api/image.rs b/client/periphery/rs/src/api/image.rs new file mode 100644 index 000000000..1d93c3e4d --- /dev/null +++ b/client/periphery/rs/src/api/image.rs @@ -0,0 +1,37 @@ +use komodo_client::entities::{ + docker::image::{Image, ImageHistoryResponseItem}, + update::Log, +}; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; + +// + +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Image)] +pub struct InspectImage { + pub name: String, +} + +// + +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Vec)] +pub struct ImageHistory { + pub name: String, +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct DeleteImage { + /// Id or name + pub name: String, +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct PruneImages {} diff --git a/client/periphery/rs/src/api/mod.rs b/client/periphery/rs/src/api/mod.rs index b3b256482..dade628f6 100644 --- a/client/periphery/rs/src/api/mod.rs +++ b/client/periphery/rs/src/api/mod.rs @@ -1,8 +1,8 @@ -use monitor_client::entities::{ +use komodo_client::entities::{ config::{DockerRegistry, GitProvider}, - deployment::ContainerSummary, - server::{ - docker_image::ImageSummary, docker_network::DockerNetwork, + docker::{ + container::ContainerListItem, image::ImageListItem, + network::NetworkListItem, volume::VolumeListItem, }, stack::ComposeProject, update::Log, @@ -16,8 +16,10 @@ pub mod build; pub mod compose; pub mod container; pub mod git; +pub mod image; pub mod network; pub mod stats; +pub mod volume; // @@ -46,9 +48,10 @@ pub struct GetDockerLists {} #[derive(Serialize, Deserialize, Debug, Clone)] pub struct GetDockerListsResponse { - pub containers: Result, Serror>, - pub networks: Result, Serror>, - pub images: Result, Serror>, + pub containers: Result, Serror>, + pub networks: Result, Serror>, + pub images: Result, Serror>, + pub volumes: Result, Serror>, pub projects: Result, Serror>, } diff --git a/client/periphery/rs/src/api/network.rs b/client/periphery/rs/src/api/network.rs index afec5a0e6..67b76547e 100644 --- a/client/periphery/rs/src/api/network.rs +++ b/client/periphery/rs/src/api/network.rs @@ -1,12 +1,16 @@ -use monitor_client::entities::{ - server::docker_network::DockerNetwork, update::Log, +use komodo_client::entities::{ + docker::network::Network, update::Log, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; +// + #[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Vec)] -pub struct GetNetworkList {} +#[response(Network)] +pub struct InspectNetwork { + pub name: String, +} // @@ -22,6 +26,7 @@ pub struct CreateNetwork { #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(Log)] pub struct DeleteNetwork { + /// Id or name pub name: String, } diff --git a/client/periphery/rs/src/api/stats.rs b/client/periphery/rs/src/api/stats.rs index bd3c7880e..821335fe9 100644 --- a/client/periphery/rs/src/api/stats.rs +++ b/client/periphery/rs/src/api/stats.rs @@ -1,9 +1,11 @@ -use monitor_client::entities::server::stats::{ +use komodo_client::entities::stats::{ SystemInformation, SystemProcess, SystemStats, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; +// + #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(SystemInformation)] pub struct GetSystemInformation {} diff --git a/client/periphery/rs/src/api/volume.rs b/client/periphery/rs/src/api/volume.rs new file mode 100644 index 000000000..39ac64084 --- /dev/null +++ b/client/periphery/rs/src/api/volume.rs @@ -0,0 +1,26 @@ +use komodo_client::entities::{docker::volume::Volume, update::Log}; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; + +// + +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Volume)] +pub struct InspectVolume { + pub name: String, +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct DeleteVolume { + /// Id or name + pub name: String, +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct PruneVolumes {} diff --git a/config_example/aio.compose.yaml b/config_example/aio.compose.yaml index fef8891e8..becca9185 100644 --- a/config_example/aio.compose.yaml +++ b/config_example/aio.compose.yaml @@ -6,76 +6,80 @@ ## A "default" server pointing to the local Periphery will be waiting in the UI on first startup. services: - monitor-core: - image: ghcr.io/mbecker20/monitor:latest ## use ghcr.io/mbecker20/monitor:latest-aarch64 for arm support + komodo-core: + image: ghcr.io/mbecker20/komodo:latest ## use ghcr.io/mbecker20/komodo:latest-aarch64 for arm support restart: unless-stopped depends_on: - - monitor-mongo + - komodo-mongo logging: driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/` networks: - - monitor-network + - komodo-network ports: - 9120:9120 - environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml - MONITOR_HOST: https://demo.monitor.dev # CHANGEME - MONITOR_TITLE: Monitor # Change the app title, displayed in the browser tab. - MONITOR_ENSURE_SERVER: http://monitor-periphery:8120 # Created the "default" server. + environment: # https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml + KOMODO_HOST: https://demo.komo.do # CHANGEME + KOMODO_TITLE: Komodo # Change the app title, displayed in the browser tab. + KOMODO_ENSURE_SERVER: http://komodo-periphery:8120 # Creates the "default" server. ## MONGO - MONITOR_MONGO_ADDRESS: monitor-mongo:27017 - MONITOR_MONGO_USERNAME: admin # match db credentials ones below - MONITOR_MONGO_PASSWORD: admin + KOMODO_MONGO_ADDRESS: komodo-mongo:27017 + KOMODO_MONGO_USERNAME: admin # match db credentials ones below + KOMODO_MONGO_PASSWORD: admin ## KEYS - MONITOR_PASSKEY: a_random_passkey # used to auth against periphery - MONITOR_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks - MONITOR_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart. + KOMODO_PASSKEY: a_random_passkey # used to auth against periphery + KOMODO_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks + KOMODO_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart. ## AUTH - MONITOR_LOCAL_AUTH: true # the default is false. - # MONITOR_GITHUB_OAUTH_ENABLED: true # also support google oauth - # MONITOR_GITHUB_OAUTH_ID: your_oauth_id - # MONITOR_GITHUB_OAUTH_SECRET: your_oauth_secret + KOMODO_LOCAL_AUTH: true # the default is false. + # KOMODO_GITHUB_OAUTH_ENABLED: true # also support google oauth + # KOMODO_GITHUB_OAUTH_ID: your_oauth_id + # KOMODO_GITHUB_OAUTH_SECRET: your_oauth_secret ## AWS - # MONITOR_AWS_ACCESS_KEY_ID: your_aws_key_id - # MONITOR_AWS_SECRET_ACCESS_KEY: your_secret_access_key + # KOMODO_AWS_ACCESS_KEY_ID: your_aws_key_id + # KOMODO_AWS_SECRET_ACCESS_KEY: your_secret_access_key ## HETZNER - # MONITOR_HETZNER_TOKEN: your_hetzner_token + # KOMODO_HETZNER_TOKEN: your_hetzner_token ## Deploy periphery container using this block, - ## or deploy it on the host directly using https://github.com/mbecker20/monitor/tree/main/scripts - monitor-periphery: + ## or deploy it on the host directly using https://github.com/mbecker20/komodo/tree/main/scripts + komodo-periphery: image: ghcr.io/mbecker20/periphery:latest # use ghcr.io/mbecker20/periphery:latest-aarch64 for arm support logging: driver: local networks: - - monitor-network + - komodo-network volumes: - /var/run/docker.sock:/var/run/docker.sock - - monitor-repos:/etc/monitor/repos # manage repos in a docker volume, or change it to an accessible host directory. + - komodo-repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. + - komodo-stacks:/etc/komodo/stacks # manage stack files in a docker volume, or change it to an accessible host directory. # environment: # # If the disk size is overreporting, can use one of these to # # whitelist / blacklist the disks to filter them, whichever is easier. - # PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/monitor/repos + # PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/komodo/repos # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap - monitor-mongo: + komodo-mongo: image: mongo command: --quiet # suppress mongo logs a bit restart: unless-stopped logging: driver: local networks: - - monitor-network + - komodo-network ports: - 27017:27017 volumes: - db-data:/data/db + - db-config:/data/configdb environment: MONGO_INITDB_ROOT_USERNAME: admin # change these MONGO_INITDB_ROOT_PASSWORD: admin volumes: db-data: - monitor-repos: + db-config: + komodo-repos: + komodo-stacks: networks: - monitor-network: {} \ No newline at end of file + komodo-network: {} \ No newline at end of file diff --git a/config_example/core.compose.yaml b/config_example/core.compose.yaml index 17a59a7a7..2abbe8aff 100644 --- a/config_example/core.compose.yaml +++ b/config_example/core.compose.yaml @@ -1,57 +1,59 @@ services: - monitor-core: - image: ghcr.io/mbecker20/monitor:latest ## use ghcr.io/mbecker20/monitor:latest-aarch64 for arm support + komodo-core: + image: ghcr.io/mbecker20/komodo:latest ## use ghcr.io/mbecker20/komodo:latest-aarch64 for arm support restart: unless-stopped depends_on: - - monitor-mongo + - komodo-mongo logging: driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/` networks: - - monitor-network + - komodo-network ports: - 9120:9120 extra_hosts: # allows for local periphery connection at "http://host.docker.internal:8120" - host.docker.internal:host-gateway - environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml - MONITOR_HOST: https://demo.monitor.dev - MONITOR_TITLE: Monitor # Change the app title, displayed in the browser tab. + environment: # https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml + KOMODO_HOST: https://demo.komo.do + KOMODO_TITLE: Komodo # Change the app title, displayed in the browser tab. ## MONGO - MONITOR_MONGO_ADDRESS: monitor-mongo:27017 - MONITOR_MONGO_USERNAME: admin # match ones below - MONITOR_MONGO_PASSWORD: admin + KOMODO_MONGO_ADDRESS: komodo-mongo:27017 + KOMODO_MONGO_USERNAME: admin # match ones below + KOMODO_MONGO_PASSWORD: admin ## KEYS - MONITOR_PASSKEY: a_random_passkey # used to auth against periphery - MONITOR_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks - MONITOR_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart. + KOMODO_PASSKEY: a_random_passkey # used to auth against periphery + KOMODO_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks + KOMODO_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart. ## AUTH - MONITOR_LOCAL_AUTH: true # the default is false. - # MONITOR_GITHUB_OAUTH_ENABLED: true # also support google oauth - # MONITOR_GITHUB_OAUTH_ID: your_oauth_id - # MONITOR_GITHUB_OAUTH_SECRET: your_oauth_secret + KOMODO_LOCAL_AUTH: true # the default is false. + # KOMODO_GITHUB_OAUTH_ENABLED: true # also support google oauth + # KOMODO_GITHUB_OAUTH_ID: your_oauth_id + # KOMODO_GITHUB_OAUTH_SECRET: your_oauth_secret ## AWS - # MONITOR_AWS_ACCESS_KEY_ID: your_aws_key_id - # MONITOR_AWS_SECRET_ACCESS_KEY: your_secret_access_key + # KOMODO_AWS_ACCESS_KEY_ID: your_aws_key_id + # KOMODO_AWS_SECRET_ACCESS_KEY: your_secret_access_key ## HETZNER - # MONITOR_HETZNER_TOKEN: your_hetzner_token + # KOMODO_HETZNER_TOKEN: your_hetzner_token - monitor-mongo: + komodo-mongo: image: mongo command: --quiet # suppress mongo logs a bit restart: unless-stopped logging: driver: local networks: - - monitor-network + - komodo-network ports: - 27017:27017 volumes: - db-data:/data/db + - db-config:/data/configdb environment: MONGO_INITDB_ROOT_USERNAME: admin # change these MONGO_INITDB_ROOT_PASSWORD: admin volumes: db-data: + db-config: networks: - monitor-network: {} \ No newline at end of file + komodo-network: {} \ No newline at end of file diff --git a/config_example/core.config.example.toml b/config_example/core.config.example.toml index 5b54fbe1e..8576b58cd 100644 --- a/config_example/core.config.example.toml +++ b/config_example/core.config.example.toml @@ -1,51 +1,51 @@ -####################### -# MONITOR CORE CONFIG # -####################### +###################### +# KOMODO CORE CONFIG # +###################### -## This is the offical "Default" config file for Monitor. +## This is the offical "Default" config file for Komodo Core. ## It serves as documentation for the meaning of the fields. -## It is located at [https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). +## It is located at `https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml`. -## This file is bundled into the official image, `ghcr.io/mbecker20/monitor`, +## This file is bundled into the official image, `ghcr.io/mbecker20/komodo`, ## as the default config at `/config/config.toml`. -## Monitor can start with no external config file mounted. +## Komodo can start with no external config file mounted. ## There is usually no need to create this file on your host. ## Most fields can instead be configured using environment variables. ## This will be the document title on the web page (shows up as text in the browser tab). -## Env: MONITOR_TITLE -## Default: 'Monitor' -# title = "Monitor-02" +## Env: KOMODO_TITLE +## Default: 'Komodo' +# title = "Komodo-02" -## This should be the url used to access Monitor in browser, potentially behind DNS. -## Eg https://monitor.dev or http://12.34.56.78:9120. This should match the address configured in your Oauth app. -## Env: MONITOR_HOST -## Required to start Monitor, no default. -host = "https://monitor.dev" +## This should be the url used to access Komodo in browser, potentially behind DNS. +## Eg https://komodo.example.com or http://12.34.56.78:9120. This should match the address configured in your Oauth app. +## Env: KOMODO_HOST +## Required to start Komodo, no default. +host = "https://komodo.example.com" ## The port the core system will run on. -## Env: MONITOR_PORT +## Env: KOMODO_PORT ## Default: 9120 # port = 9121 ## This is the token used to authenticate core requests to periphery. ## Ensure this matches a passkey in the connected periphery configs. ## If the periphery servers don't have passkeys configured, this doesn't need to be changed. -## Env: MONITOR_PASSKEY -## Required to start Monitor, no default +## Env: KOMODO_PASSKEY +## Required to start Komodo, no default passkey = "a_random_passkey" ## Ensure a server with this address exists on Core ## upon first startup. Used with AIO compose. ## Optional, no default. -## Env: MONITOR_ENSURE_SERVER -# ensure_server = "http://monitor-periphery:8120" +## Env: KOMODO_ENSURE_SERVER +# ensure_server = "http://komodo-periphery:8120" ## Disables write support on resources in the UI. ## This protects users that that would normally have write priviledges during their UI usage, ## when they intend to fully rely on ResourceSyncs to manage config. -## Env: MONITOR_UI_WRITE_DISABLED +## Env: KOMODO_UI_WRITE_DISABLED ## Default: false # ui_write_disabled = true @@ -56,32 +56,32 @@ passkey = "a_random_passkey" ## Configure the database connection in one of the following ways: ## Pass a full Mongo URI. Suitable for Mongo Atlas. -## Env: MONITOR_MONGO_URI +## Env: KOMODO_MONGO_URI # mongo.uri = "mongodb://username:password@localhost:27017" ## ==== * OR * ==== ## # Construct the address as mongodb://{username}:{password}@{address} -## Env: MONITOR_MONGO_ADDRESS +## Env: KOMODO_MONGO_ADDRESS mongo.address = "localhost:27017" -## Env: MONITOR_MONGO_USERNAME +## Env: KOMODO_MONGO_USERNAME # mongo.username = "admin" -## Env: MONITOR_MONGO_PASSWORD +## Env: KOMODO_MONGO_PASSWORD # mongo.password = "admin" ## ==== other ==== -## Monitor will create its collections under this database name. -## The only reason to change this is if multiple Monitors share the same db. -## Env: MONITOR_MONGO_DB_NAME -## Default: monitor. -# mongo.db_name = "monitor" +## Komodo will create its collections under this database name. +## The only reason to change this is if multiple Komodo Cores share the same db. +## Env: KOMODO_MONGO_DB_NAME +## Default: komodo. +# mongo.db_name = "komodo" ## This is the assigned app_name of the mongo client. -## The only reason to change this is if multiple Monitors share the same db. -## Env: MONITOR_MONGO_APP_NAME -## Default: monitor_core. -# mongo.app_name = "monitor_core" +## The only reason to change this is if multiple Komodo Cores share the same db. +## Env: KOMODO_MONGO_APP_NAME +## Default: komodo_core. +# mongo.app_name = "komodo_core_01" ################ # AUTH / LOGIN # @@ -91,36 +91,36 @@ mongo.address = "localhost:27017" ## The password will be hashed and stored in the db for login comparison. ## ## NOTE: -## Monitor has no API to recover account logins, but if this happens you can doctor the db using Mongo Compass. +## Komodo has no API to recover account logins, but if this happens you can doctor the db using Mongo Compass. ## Create a new user, login to the database with Compass, note down your old users username and _id. ## Then delete the old user, and update the new user to have the same username and _id. ## Make sure to set `enabled: true` and maybe `admin: true` on the new user as well, while using Compass. ## -## Env: MONITOR_LOCAL_AUTH +## Env: KOMODO_LOCAL_AUTH ## Default: false # local_auth = true ## Allows all users to have Read level access to all resources. -## Env: MONITOR_TRANSPARENT_MODE +## Env: KOMODO_TRANSPARENT_MODE ## Default: false # transparent_mode = true ## New users will be automatically enabled when they sign up. ## Otherwise, new users will be disabled on first login. ## The first user to login will always be enabled on creation. -## Env: MONITOR_ENABLE_NEW_USERS +## Env: KOMODO_ENABLE_NEW_USERS ## Default: false # enable_new_users = true ## Optionally provide a specific jwt secret. ## Passing nothing or an empty string will cause one to be generated on every startup. -## This means users will have to log in again if Monitor restarts. -## Env: MONITOR_JWT_SECRET +## This means users will have to log in again if Komodo restarts. +## Env: KOMODO_JWT_SECRET # jwt_secret = "your_random_secret" ## Specify how long a user can stay logged in before they have to log in again. ## All jwts are invalidated on application restart unless `jwt_secret` is set. -## Env: MONITOR_JWT_TTL +## Env: KOMODO_JWT_TTL ## Default: 1-day. ## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day # jwt_ttl = "3-day" @@ -131,29 +131,29 @@ mongo.address = "localhost:27017" ## Google -## Env: MONITOR_GOOGLE_OAUTH_ENABLED +## Env: KOMODO_GOOGLE_OAUTH_ENABLED ## Default: false # google_oauth.enabled = true -## Env: MONITOR_GOOGLE_OAUTH_ID +## Env: KOMODO_GOOGLE_OAUTH_ID ## Required if google_oauth is enabled. # google_oauth.id = "your_google_client_id" -## Env: MONITOR_GOOGLE_OAUTH_SECRET +## Env: KOMODO_GOOGLE_OAUTH_SECRET ## Required if google_oauth is enabled. # google_oauth.secret = "your_google_client_secret" ## Github -## Env: MONITOR_GITHUB_OAUTH_ENABLED +## Env: KOMODO_GITHUB_OAUTH_ENABLED ## Default: false # github_oauth.enabled = true -## Env: MONITOR_GITHUB_OAUTH_ID +## Env: KOMODO_GITHUB_OAUTH_ID ## Required if github_oauth is enabled. # github_oauth.id = "your_github_client_id" -## Env: MONITOR_GITHUB_OAUTH_SECRET +## Env: KOMODO_GITHUB_OAUTH_SECRET ## Required if github_oauth is enabled. # github_oauth.secret = "your_github_client_secret" @@ -163,25 +163,25 @@ mongo.address = "localhost:27017" ## This token must be given to git provider during repo webhook config. ## The secret configured on the git provider side must match the secret configured here. -## Env: MONITOR_WEBHOOK_SECRET +## Env: KOMODO_WEBHOOK_SECRET ## Default: empty (none) webhook_secret = "a_random_webhook_secret" ## An alternate base url that is used to recieve git webhook requests. ## If empty or not specified, will use 'host' address as base. -## This is useful if Monitor is on an internal network, but can have a +## This is useful if Komodo is on an internal network, but can have a ## proxy just allowing through the webhook api using NGINX. -## Env: MONITOR_WEBHOOK_BASE_URL +## Env: KOMODO_WEBHOOK_BASE_URL ## Default: empty (none) -# webhook_base_url = "https://git-webhook.monitor.dev" +# webhook_base_url = "https://git-webhook.komo.do" ## Configure Github webhook app. Enables webhook management apis. ## -## Env: MONITOR_GITHUB_WEBHOOK_APP_APP_ID +## Env: KOMODO_GITHUB_WEBHOOK_APP_APP_ID # github_webhook_app.app_id = 1234455 # Find on the app page. ## Env: -## - MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS -## - MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES +## - KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS +## - KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES # github_webhook_app.installations = [ # ## Find the id after installing the app to user / organization. "namespace" is the username / organization name. # { id = 1234, namespace = "mbecker20" } @@ -191,34 +191,34 @@ webhook_secret = "a_random_webhook_secret" ## This is defaulted to `/github/private-key.pem`, and doesn't need to be changed if running core in Docker. ## Just mount the private key pem file on the host to `/github/private-key.pem` in the container. ## Eg. `/your/path/to/key.pem : /github/private-key.pem` -## Env: MONITOR_GITHUB_WEBHOOK_APP_PK_PATH +## Env: KOMODO_GITHUB_WEBHOOK_APP_PK_PATH # github_webhook_app.pk_path = "/path/to/pk.pem" ########### # LOGGING # ########### -## Specify the log level of the monitor core application -## Env: MONITOR_LOGGING_LEVEL +## Specify the logging verbosity +## Env: KOMODO_LOGGING_LEVEL ## Options: off, error, warn, info, debug, trace ## Default: info # logging.level = "info" ## Specify the logging format for stdout / stderr. -## Env: MONITOR_LOGGING_STDIO +## Env: KOMODO_LOGGING_STDIO ## Options: standard, json, none ## Default: standard # logging.stdio = "standard" ## Optionally specify a opentelemetry otlp endpoint to send traces to. -## Env: MONITOR_LOGGING_OTLP_ENDPOINT +## Env: KOMODO_LOGGING_OTLP_ENDPOINT # logging.otlp_endpoint = "http://localhost:4317" ## Set the opentelemetry service name. -## This will be attached to the telemetry Monitor will send. -## Env: MONITOR_LOGGING_OPENTELEMETRY_SERVICE_NAME -## Default: "Monitor" -# logging.opentelemetry_service_name = "Monitor-02" +## This will be attached to the telemetry Komodo will send. +## Env: KOMODO_LOGGING_OPENTELEMETRY_SERVICE_NAME +## Default: "Komodo" +# logging.opentelemetry_service_name = "Komodo-01" ########### # PRUNING # @@ -226,13 +226,13 @@ webhook_secret = "a_random_webhook_secret" ## The number of days to keep historical system stats around, or 0 to disable pruning. ## Stats older that are than this number of days are deleted on a daily cycle. -## Env: MONITOR_KEEP_STATS_FOR_DAYS +## Env: KOMODO_KEEP_STATS_FOR_DAYS ## Default: 14 # keep_stats_for_days = 14 ## The number of days to keep alerts around, or 0 to disable pruning. ## Alerts older that are than this number of days are deleted on a daily cycle. -## Env: MONITOR_KEEP_ALERTS_FOR_DAYS +## Env: KOMODO_KEEP_ALERTS_FOR_DAYS ## Default: 14 # keep_alerts_for_days = 14 @@ -241,25 +241,25 @@ webhook_secret = "a_random_webhook_secret" ################## ## Interval at which to poll Stacks for any updates / automated actions. -## Env: MONITOR_STACK_POLL_INTERVAL +## Env: KOMODO_STACK_POLL_INTERVAL ## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. ## Default: `5-min`. # stack_poll_interval = "1-min" ## Interval at which to poll Syncs for any updates / automated actions. -## Env: MONITOR_SYNC_POLL_INTERVAL +## Env: KOMODO_SYNC_POLL_INTERVAL ## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. ## Default: `5-min`. # sync_poll_interval = "1-min" ## Interval at which to poll Builds (latest commit hash) for any updates / automated actions. -## Env: MONITOR_STACK_POLL_INTERVAL +## Env: KOMODO_STACK_POLL_INTERVAL ## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. ## Default: `5-min`. # build_poll_interval = "1-min" ## Interval at which to poll Repos (latest commit hash) for any updates / automated actions. -## Env: MONITOR_REPO_POLL_INTERVAL +## Env: KOMODO_REPO_POLL_INTERVAL ## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. ## Default: `5-min`. # repo_poll_interval = "1-min" @@ -274,22 +274,22 @@ webhook_secret = "a_random_webhook_secret" # CLOUD PROVIDERS # ################### -## Monitor can build images on purpose deployed AWS EC2 instances, +## Komodo can build images on purpose deployed AWS EC2 instances, ## and afterwards destroying the instance. -## Additionally, Monitor can deploy cloud VPS on AWS EC2 and Hetzner. +## Additionally, Komodo can deploy cloud VPS on AWS EC2 and Hetzner. ## Use the Template resource to configure launch preferences. ## Hetzner is not supported for builds as their pricing model is by the hour, ## while AWS is by the minute. This is very important for builds. ## Provide aws api keys for ephemeral builders / server launch -## Env: MONITOR_AWS_ACCESS_KEY_ID +## Env: KOMODO_AWS_ACCESS_KEY_ID # aws.access_key_id = "your_aws_key_id" -## Env: MONITOR_AWS_SECRET_ACCESS_KEY +## Env: KOMODO_AWS_SECRET_ACCESS_KEY # aws.secret_access_key = "your_aws_secret_key" ## Provide hetzner api token for server launch -## Env: MONITOR_HETZNER_TOKEN +## Env: KOMODO_HETZNER_TOKEN # hetzner.token = "your_hetzner_token" ################# diff --git a/config_example/periphery.config.example.toml b/config_example/periphery.config.example.toml index f9f72549e..c22a54e03 100644 --- a/config_example/periphery.config.example.toml +++ b/config_example/periphery.config.example.toml @@ -1,22 +1,22 @@ -############################ -# MONITOR PERIPHERY CONFIG # -############################ +########################### +# KOMODO PERIPHERY CONFIG # +########################### ## Optional. The port the server runs on. 8120 is default ## Env: PERIPHERY_PORT # port = 8120 -## Optional. /etc/monitor/repos is default. +## Optional. /etc/komodo/repos is default. ## The directory periphery will use to manage repos. ## The periphery user must have write access to this directory. ## Env: PERIPHERY_REPO_DIR -# repo_dir = "/home/ubuntu/monitor/repos" +# repo_dir = "/home/ubuntu/komodo/repos" -## Optional. /etc/monitor/stacks is default. +## Optional. /etc/komodo/stacks is default. ## The directory periphery will use to manage stacks. ## The periphery user must have write access to this directory. ## Env: PERIPHERY_STACK_DIR -# stack_dir = "/home/ubuntu/monitor/stacks" +# stack_dir = "/home/ubuntu/komodo/stacks" ## Optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded ## Env: PERIPHERY_STATS_POLLING_RATE @@ -30,11 +30,11 @@ ## Optional. Only include mounts at specific paths in the disc report. ## Env: PERIPHERY_INCLUDE_DISK_MOUNTS -# include_disk_mounts = ["/etc/monitor/repos"] +# include_disk_mounts = ["/etc/komodo/repos"] ## Optional. Don't include these mounts in the disk report. ## Env: PERIPHERY_EXCLUDE_DISK_MOUNTS -# exclude_disk_mounts = ["/etc/monitor/repos"] +# exclude_disk_mounts = ["/etc/komodo/repos"] ######## # AUTH # @@ -109,7 +109,7 @@ # LOGGING # ########### -## Specify the log level of the monitor core application +## Specify the logging verbosity ## Default: info ## Options: off, error, warn, info, debug, trace ## Env: PERIPHERY_LOGGING_LEVEL @@ -127,6 +127,6 @@ # logging.otlp_endpoint = "http://localhost:4317" ## Set the opentelemetry service name attached to the telemetry this periphery will send. -## Default: "Monitor" +## Default: "Komodo" ## Env: PERIPHERY_LOGGING_OPENTELEMETRY_SERVICE_NAME # logging.opentelemetry_service_name = "Periphery-02" \ No newline at end of file diff --git a/docsite/docs/api.md b/docsite/docs/api.md index 51271afd4..53f1fe6f1 100644 --- a/docsite/docs/api.md +++ b/docsite/docs/api.md @@ -1,6 +1,6 @@ # API -Monitor Core exposes an http API to read data, write configuration, and execute actions. The API documentation is generated from the code and is [available here](https://docs.rs/monitor_client/latest/monitor_client/api/index.html). +Komodo Core exposes an http API to read data, write configuration, and execute actions. The API documentation is generated from the code and is [available here](https://docs.rs/komodo_client/latest/komodo_client/api/index.html). -You can also install the [Monitor CLI](https://crates.io/crates/monitor_cli) to execute actions like RunBuild or DeployStack from the command line. -This can be coupled with scripts in Monitor Repos to achieve unlimited automation. \ No newline at end of file +You can also install the [Komodo CLI](https://crates.io/crates/komodo_cli) to execute actions like RunBuild or DeployStack from the command line. +This can be coupled with scripts in Komodo Repos to achieve unlimited automation. \ No newline at end of file diff --git a/docsite/docs/build-images/builders.md b/docsite/docs/build-images/builders.md index 61d5ec19d..85ad44ce1 100644 --- a/docsite/docs/build-images/builders.md +++ b/docsite/docs/build-images/builders.md @@ -1,13 +1,13 @@ # Builders -A builder is a machine running monitor periphery and docker, which is able to handle a RunBuild command from monitor core. Any server connected to monitor can be chosen as the builder for a build. +A builder is a machine running the Komodo Periphery agent (and usually docker), which is able to handle a RunBuild / BuildRepo command from Komodo core. Any server connected to Komodo can be chosen as the builder for a build. -Building on a machine running production software is usually not a great idea, as this process can use a lot of system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Monitor supports AWS EC2 for this task. +Building on a machine running production software is usually not a great idea, as this process can use a lot of system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Komodo supports AWS EC2 for this task. ## AWS builder -Builders are now monitor resources, and are managed via the core API / can be updated using the UI. -To use this feature, you need an AWS EC2 AMI with docker and monitor periphery configured to run on system start. +Builders are now Komodo resources, and are managed via the core API / can be updated using the UI. +To use this feature, you need an AWS EC2 AMI with docker and Komodo Periphery configured to run on system start. Once you create your builder and add the necessary configuration, it will be available to attach to builds. ### Setup the instance @@ -22,7 +22,7 @@ apt upgrade -y curl -fsSL https://get.docker.com | sh systemctl enable docker.service systemctl enable containerd.service -curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 +curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3 systemctl enable periphery.service ``` @@ -46,4 +46,4 @@ Once Periphery is running, you can navigate to the instance in the AWS UI and ch The AMI will provide a unique id starting with `ami-`, use this with the builder configuration. ### Configure security groups / firewall -The builders will need inbound access on port 8120 from monitor core, be sure to add a security group with this rule to the Builder configuration. +The builders will need inbound access on port 8120 from Komodo Core, be sure to add a security group with this rule to the Builder configuration. diff --git a/docsite/docs/build-images/configuration.md b/docsite/docs/build-images/configuration.md index 8bae86a15..ab99ed186 100644 --- a/docsite/docs/build-images/configuration.md +++ b/docsite/docs/build-images/configuration.md @@ -1,15 +1,15 @@ # Configuration -Monitor just needs a bit of information in order to build your image. +Komodo just needs a bit of information in order to build your image. ### Provider configuration -Monitor supports cloning repos over http/s, from any provider that supports cloning private repos using `git clone https://@git-provider.net//`. +Komodo supports cloning repos over http/s, from any provider that supports cloning private repos using `git clone https://@git-provider.net//`. Accounts / access tokens can be configured in either the [core config](../core-setup.md#configuration) or in the [periphery config](../connecting-servers.md#manual-install-steps). ### Repo configuration -To specify the git repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo. +To specify the git repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like `mbecker20/komodo`, it includes the username / organization that owns the repo. Many repos are private, in this case an access token is needed by the building server. It can either come from a provider defined in the core configuration, @@ -17,7 +17,7 @@ or in the periphery configuration of the building server. ### Docker build configuration -In order to docker build, monitor just needs to know the build directory and the path of the Dockerfile relative to the repo, you can configure these in the *build config* section. +In order to docker build, Komodo just needs to know the build directory and the path of the Dockerfile relative to the repo, you can configure these in the *build config* section. If the build directory is the root of the repository, you pass the build path as `.`. If the build directory is some folder of the repo, just pass the name of the the folder. Do not pass the preceding "/". for example `build/directory` @@ -25,7 +25,7 @@ The dockerfile's path is given relative to the build directory. So if your build ### Image registry -Monitor supports pushing to any docker registry. +Komodo supports pushing to any docker registry. Any of the accounts that are specified in config for the specific registry, between the core config and builder, will be available to use for authentication against the registry. Additionally, allowed organizations on the docker registry can be specified on the core config and attached to builds. Doing so will cause the images to be published under the organization's namespace rather than the account's. diff --git a/docsite/docs/build-images/index.mdx b/docsite/docs/build-images/index.mdx index 6bac601bb..2d5f3917f 100644 --- a/docsite/docs/build-images/index.mdx +++ b/docsite/docs/build-images/index.mdx @@ -4,7 +4,7 @@ slug: /build-images # Building Images -Monitor builds docker images by cloning the source repository from the configured git provider, running `docker build`, +Komodo builds docker images by cloning the source repository from the configured git provider, running `docker build`, and pushing the resulting image to the configured docker registry. Any repo containing a `Dockerfile` is buildable using this method. ```mdx-code-block diff --git a/docsite/docs/build-images/versioning.md b/docsite/docs/build-images/versioning.md index 98b6dde90..b7e8d35f5 100644 --- a/docsite/docs/build-images/versioning.md +++ b/docsite/docs/build-images/versioning.md @@ -1,3 +1,5 @@ -# Versioning +# Image Versioning -Monitor uses a major.minor.patch versioning scheme. Every build will auto increment the patch number, and push the image to docker hub with the version tag as well as the ```latest``` tag. \ No newline at end of file +Komodo uses a major.minor.patch versioning scheme to Build versioning. By default, every RunBuild will auto increment the Build's version patch number, and push the image to docker hub with the version tag, as well as the `latest` tag. A tag containing the latest short commit hash at the time the repo was cloned will also be created. + +You can also turn off the auto incrementing feature, and manage the version yourself. In addition, you can configure a "version tag" on the build. This will postfix the version tag / commit hash tag with a custom label. For example, an image tag of `dev` will produce tags like `image_name:1.1.1-dev` and `image_name:h3c87c-dev`. \ No newline at end of file diff --git a/docsite/docs/connecting-servers.md b/docsite/docs/connecting-servers.md index 216050c2b..d6fac7ad2 100644 --- a/docsite/docs/connecting-servers.md +++ b/docsite/docs/connecting-servers.md @@ -1,37 +1,41 @@ # Connecting Servers -Connecting a server to monitor has 2 steps: +Connecting a server to Komodo has 2 steps: 1. Install the Periphery agent on the server -2. Adding the server to monitor via the core API +2. Adding the server to Komodo via the core API -Once step 1. is complete, you can just connect the server to Monitor Core from the UI. +Once step 1. is complete, you can just connect the server to Komodo Core from the UI. ## Install -You can install Periphery as a systemd managed process, run it as a [docker container](https://github.com/mbecker20/monitor/pkgs/container/periphery), or do whatever you want with the binary. +You can install Periphery as a systemd managed process, run it as a [docker container](https://github.com/mbecker20/komodo/pkgs/container/periphery), or do whatever you want with the binary. Some Periphery actions interact with your hosts file system, like cloning repos, or accessing local compose files. For this reason, runnning periphery in a container can be a bit more complicated. Additionally, Periphery in a container tends to overreport the disks by default, but this can be fixed via some configuration. +:::warning +Allowing unintended access to the Periphery agent API is a security risk. Ensure to take appropriate measures to block access to the Periphery API, such as firewall rules on port `8120`. Additionally, you can whitelist your Komodo Core IP address in the [Periphery config](https://github.com/mbecker20/komodo/blob/2463ed3879ee56821f99d1f09581d659ee5d0575/config_example/periphery.config.example.toml#L46), and configure it to [only accept requests matching including your Core passkey](https://github.com/mbecker20/komodo/blob/2463ed3879ee56821f99d1f09581d659ee5d0575/config_example/periphery.config.example.toml#L51). +::: + ### Install the Periphery agent - systemd As root user: ```sh -curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 +curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3 ``` Periphery can also be installed to run as the calling user, just note this comes with some additional configuration. ```sh -curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 - --user +curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3 - --user ``` -You can find more information (and view the script) in the [readme](https://github.com/mbecker20/monitor/tree/main/scripts). +You can find more information (and view the script) in the [readme](https://github.com/mbecker20/komodo/tree/main/scripts). :::info -This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Monitor version release, and it will update the periphery version. +This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Komodo version release, and it will update the periphery version. ::: ### Install the Periphery agent - container @@ -39,7 +43,7 @@ This script can be run multiple times without issue, and it won't change existin You can use a docker compose file like this: ```yaml services: - monitor-periphery: + komodo-periphery: image: ghcr.io/mbecker20/periphery:latest # use ghcr.io/mbecker20/periphery:latest-aarch64 for arm support logging: driver: local @@ -47,22 +51,27 @@ services: - 8120:8120 volumes: - /var/run/docker.sock:/var/run/docker.sock - - monitor-repos:/etc/monitor/repos # manage repos in a docker volume, or change it to an accessible host directory. + - komodo-repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. + - komodo-stacks:/etc/komodo/stacks # manage stacks in a docker volume, or change it to an accessible host directory. # environment: # # If the disk size is overreporting, can use one of these to # # whitelist / blacklist the disks to filter them, whichever is easier. - # PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/monitor/repos + # PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/komodo/repos # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap + +volumes: + komodo-repos: + komodo-stacks: ``` ### Manual install steps - binaries -1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases). +1. Download the periphery binary from the latest [release](https://github.com/mbecker20/komodo/releases). -2. Create and edit your config files, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). +2. Create and edit your config files, following the [config example](https://github.com/mbecker20/komodo/blob/main/config_example/periphery.config.example.toml). :::note -See the [periphery config docs](https://docs.rs/monitor_client/latest/monitor_client/entities/config/periphery/index.html) +See the [periphery config docs](https://docs.rs/komodo_client/latest/komodo_client/entities/config/periphery/index.html) for more information on configuring periphery. ::: diff --git a/docsite/docs/core-setup.md b/docsite/docs/core-setup.md index d15dd4c09..3211ec190 100644 --- a/docsite/docs/core-setup.md +++ b/docsite/docs/core-setup.md @@ -1,21 +1,21 @@ -# Monitor Core Setup +# Komodo Core Setup -To run Monitor Core, you will need Docker. See [the docker install docs](https://docs.docker.com/engine/install/). +To run Komodo Core, you will need Docker. See [the docker install docs](https://docs.docker.com/engine/install/). -### Deploy Monitor Core with Docker Compose +### Deploy Komodo Core with Docker Compose -There is an example compose file here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml). +There is an example compose file here: [https://github.com/mbecker20/komodo/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/komodo/blob/main/config_example/core.compose.yaml). Copy the contents to a `compose.yaml`, and deploy it with `docker compose up -d`. :::info -Monitor Core itself can really only run remote builds. -You also have to [**install the Monitor Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers** +Komodo Core itself can really only run remote builds. +You also have to [**install the Komodo Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers** in order to alert / deploy etc. -If you **only need to connect on one server** (the one you are deploying Monitor Core on), you can do it all dockerized, -and use the [**all-in-one compose file**](https://github.com/mbecker20/monitor/blob/main/config_example/aio.compose.yaml). -This will deploy Monitor Core and Periphery, and automatically add the local periphery as a connected server. +If you **only need to connect on one server** (the one you are deploying Komodo Core on), you can do it all dockerized, +and use the [**all-in-one compose file**](https://github.com/mbecker20/komodo/blob/main/config_example/aio.compose.yaml). +This will deploy Komodo Core and Periphery, and automatically add the local periphery as a connected server. Deploying with the AIO compose file **will not** stop you from connecting more servers later, and is really just for setup convenience. @@ -24,10 +24,10 @@ You can currently and always will be able to **connect as many servers an you li ### Configuration -You can configure Monitor with environment variables, or using a config file. +You can configure Komodo with environment variables, or using a config file. -The example config file in the Monitor repo documents all the configuration options, along with the corresponding environment variables. -It can be found here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). +The example config file in the Komodo repo documents all the configuration options, along with the corresponding environment variables. +It can be found here: [https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml). Note that configuration passed in environment variables will take precedent over what is given in the file. @@ -35,7 +35,7 @@ Note that configuration passed in environment variables will take precedent over To enable OAuth2 login, you must create a client on the respective OAuth provider, for example [google](https://developers.google.com/identity/protocols/oauth2) or [github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps). -Monitor uses the `web application` login flow. +Komodo uses the `web application` login flow. The redirect uri is `/auth/google/callback` for google and `/auth/github/callback` for github. ::: @@ -56,7 +56,7 @@ Core itself only supports http, so a reverse proxy like [caddy](https://caddyser Mongo can be run locally using the docker cli: ```sh -docker run --name monitor-mongo \ +docker run --name komodo-mongo \ --network host \ -v /local/storage/path:/data/db \ -e MONGO_INITDB_ROOT_USERNAME="admin" \ @@ -71,20 +71,20 @@ Note that this uses "host" networking, which will allow core to connect over loc Many users will prefer the default "bridge" network, and to use port mapping with `-p 27017:27017`. :::note -The disk space requirements of Monitor are dominated by the storage of system stats. +The disk space requirements of Komodo are dominated by the storage of system stats. This depends on the number of connected servers (more system stats being produces / stored), stats collection frequency, and your stats pruning configuration. If you need to save on space, you can configure these fields in your core config: - Stats poll frequency can be reduced using, for example, `monitoring_interval = "15-sec"` - Pruning can be tuned more aggresively using, for example, `keep_stats_for_days = 7`. ::: -### 2. Start Monitor core +### 2. Start Komodo core -Monitor core is distributed via Github Container Registry under the package [mbecker20/monitor](https://github.com/mbecker20/monitor/pkgs/container/monitor). +Komodo core is distributed via Github Container Registry under the package [mbecker20/komodo](https://github.com/mbecker20/komodo/pkgs/container/komodo). ```sh -docker run -d --name monitor-core \ +docker run -d --name komodo-core \ --network host \ - -v $HOME/.monitor/core.config.toml:/config/config.toml \ - ghcr.io/mbecker20/monitor:latest + -v $HOME/.config/komodo/core.config.toml:/config/config.toml \ + ghcr.io/mbecker20/komodo:latest ``` Note that this uses "host" networking, which will allow it to connect to a local periphery agent on localhost. diff --git a/docsite/docs/deploy-containers/configuration.md b/docsite/docs/deploy-containers/configuration.md index 04d0a5133..e5125e396 100644 --- a/docsite/docs/deploy-containers/configuration.md +++ b/docsite/docs/deploy-containers/configuration.md @@ -4,12 +4,12 @@ There are two options to configure the docker image to deploy. -### Attaching a Monitor build -If the software you want to deploy is built by Monitor, you can attach the build directly to the deployment. +### Attaching a Komodo build +If the software you want to deploy is built by Komodo, you can attach the build directly to the deployment. -By default, Monitor will deploy the latest available version of the build, or you can specify a specific version using the version dropdown. +By default, Komodo will deploy the latest available version of the build, or you can specify a specific version using the version dropdown. -Also by default, Monitor will use the same docker account that is attached to the build in order to pull the image on the periphery server. If that account is not available on the server, you can specify another available account to use instead, this account just needs to have read access to the docker repository. +Also by default, Komodo will use the same docker account that is attached to the build in order to pull the image on the periphery server. If that account is not available on the server, you can specify another available account to use instead, this account just needs to have read access to the docker repository. ### Using a custom image You can also manually specify an image name, like `mongo` or `ghcr.io/mbecker20/random_image:0.1.1`. @@ -18,7 +18,7 @@ If the image repository is private, you can still select an available docker acc ## Configuring the network -One feature of docker is that it allows for the creation of [virtual networks between containers](https://docs.docker.com/network/). Monitor allows you to specify a docker virtual network to connect the container to, or to use the host system networking to bypass the docker virtual network. +One feature of docker is that it allows for the creation of [virtual networks between containers](https://docs.docker.com/network/). Komodo allows you to specify a docker virtual network to connect the container to, or to use the host system networking to bypass the docker virtual network. The default selection is `host`, which bypasses the docker virtual network layer. @@ -34,11 +34,11 @@ Note that this is not the only affect of using a network other than `host`. For ## Configuring restart behavior -Docker, like systemd, has a couple options for handling when a container exits. See [docker restart policies](https://docs.docker.com/config/containers/start-containers-automatically/). Monitor allows you to select the appropriate restart behavior from these options. +Docker, like systemd, has a couple options for handling when a container exits. See [docker restart policies](https://docs.docker.com/config/containers/start-containers-automatically/). Komodo allows you to select the appropriate restart behavior from these options. ## Configuring environment variables -Monitor enables you to easily manage environment variables passed to the container. +Komodo enables you to easily manage environment variables passed to the container. In the GUI, navigate to the environment tab of the configuration on the deployment page. You pass environment variables just as you would with a ```.env``` file: @@ -64,7 +64,7 @@ These can be configured easily with the GUI in the 'volumes' card. You can confi ## Extra args -Not all features of docker are mapped directly by monitor, only the most common. You can still specify any custom flags for monitor to include in the ```docker run``` command by utilizing 'extra args'. For example, you can enable log rotation using these two extra args: +Not all features of docker are mapped directly by Komodo, only the most common. You can still specify any custom flags for Komodo to include in the `docker run` command by utilizing 'extra args'. For example, you can enable log rotation using these two extra args: ``` --log-opt max-size=10M @@ -81,4 +81,4 @@ Sometimes you need to override the default command in the image, or specify some docker run -d --name mongo-db mongo:6.0.3 --quiet ``` -In order to achieve this with monitor, just pass `--quiet` to 'command'. \ No newline at end of file +In order to achieve this with Komodo, just pass `--quiet` to 'command'. \ No newline at end of file diff --git a/docsite/docs/deploy-containers/index.mdx b/docsite/docs/deploy-containers/index.mdx index f3fa3d522..86dac573c 100644 --- a/docsite/docs/deploy-containers/index.mdx +++ b/docsite/docs/deploy-containers/index.mdx @@ -1,6 +1,6 @@ # Deploy Containers -Monitor can deploy any docker images that it can access with the configured docker accounts. +Komodo can deploy any docker images that it can access with the configured docker accounts. It works by parsing the deployment configuration into a `docker run` command, which is then run on the target system. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well. diff --git a/docsite/docs/deploy-containers/lifetime-management.md b/docsite/docs/deploy-containers/lifetime-management.md index 4d6595934..ae1605245 100644 --- a/docsite/docs/deploy-containers/lifetime-management.md +++ b/docsite/docs/deploy-containers/lifetime-management.md @@ -1,6 +1,6 @@ # Container Management -The lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. Monitor will display the state of the container and provides an API to manage all your container's lifetimes. +The lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. Komodo will display the state of the container and provides an API to manage all your container's lifetimes. This is achieved internally by running the appropriate docker command for the requested action (docker stop, docker start, etc). diff --git a/docsite/docs/docker-compose.md b/docsite/docs/docker-compose.md index 6754910cd..1763a7150 100644 --- a/docsite/docs/docker-compose.md +++ b/docsite/docs/docker-compose.md @@ -1,13 +1,13 @@ # Docker Compose -Monitor supports docker compose through the `Stack` resource. +Komodo supports docker compose through the `Stack` resource. ## Define the compose file/s -Monitor supports 3 ways of defining the compose files: - 1. **Write them in the UI**, and Monitor will write them to your host at deploy-time. - 2. **Store them in a git repo**, and have Monitor clone it on the host to deploy. - 3. **Store the files anywhere on the host**, and Monitor will just run the compose commands on the existing files. +Komodo supports 3 ways of defining the compose files: + 1. **Write them in the UI**, and Komodo will write them to your host at deploy-time. + 2. **Store them in a git repo**, and have Komodo clone it on the host to deploy. + 3. **Store the files anywhere on the host**, and Komodo will just run the compose commands on the existing files. The recommended way to deploy Stacks is using compose files located in a git repo. @@ -18,24 +18,24 @@ If you manage your compose files in git repos: - You can use the git webhooks to do other automations when you change the compose file contents. Redeploying will be as easy as just `git push`. :::info -Many Monitor resources need access to git repos. There is an in-built token management system (managed in UI or in config file) to give resources access to credentials. +Many Komodo resources need access to git repos. There is an in-built token management system (managed in UI or in config file) to give resources access to credentials. All resources which depend on git repos are able to use these credentials to access private repos. ::: ## Importing Existing Compose projects -First create the Stack in Monitor, and ensure it has access to the compose files using one +First create the Stack in Komodo, and ensure it has access to the compose files using one of the three methods above. Make sure to attach the server you wish to deploy on. -In order for Monitor to pick up a running project, it has to know the compose "project name". +In order for Komodo to pick up a running project, it has to know the compose "project name". You can find the project name by running `docker compose ls` on the host. -By default, Monitor will assume the Stack name is the compose project name. +By default, Komodo will assume the Stack name is the compose project name. If this is different than the project name on the host, you can configure a custom "Project Name" in the config. ## Pass Environment Variables -Monitor is able to pass custom environment variables to the docker compose process. +Komodo is able to pass custom environment variables to the docker compose process. This works by: 1. Write the variables to a ".env" file on the host at deploy-time. diff --git a/docsite/docs/file-paths.md b/docsite/docs/file-paths.md index 71545ef29..78a06c95b 100644 --- a/docsite/docs/file-paths.md +++ b/docsite/docs/file-paths.md @@ -1,6 +1,6 @@ # File Paths -When working with monitor, you might have to configure file or directory paths. +When working with Komodo, you might have to configure file or directory paths. ## Relative Paths diff --git a/docsite/docs/intro.md b/docsite/docs/intro.md index b59284994..d58913c9d 100644 --- a/docsite/docs/intro.md +++ b/docsite/docs/intro.md @@ -2,11 +2,11 @@ slug: /intro --- -# What is Monitor? +# What is Komodo? -Monitor is a web app to provide structure for managing your servers, builds, deployments, and automated procedures. +Komodo is a web app to provide structure for managing your servers, builds, deployments, and automated procedures. -With Monitor you can: +With Komodo you can: - Connect all of your servers, and alert on CPU usage, memory usage, and disk usage. - Create, start, stop, and restart Docker containers on the connected servers, and view their status and logs. @@ -18,29 +18,29 @@ With Monitor you can: ## Docker -Monitor is opinionated by design, and uses [docker](https://docs.docker.com/) as the container engine for building and deploying. +Komodo is opinionated by design, and uses [docker](https://docs.docker.com/) as the container engine for building and deploying. :::info -Monitor also supports [**podman**](https://podman.io/) instead of docker by utilizing the `podman` -> `docker` alias. +Komodo also supports [**podman**](https://podman.io/) instead of docker by utilizing the `podman` -> `docker` alias. For Stack / docker compose support with podman, check out [**podman-compose**](https://github.com/containers/podman-compose). Thanks to `u/pup_kit` for checking this. ::: ## Architecture and Components -Monitor is composed of a single core and any amount of connected servers running the periphery application. +Komodo is composed of a single core and any amount of connected servers running the periphery application. ### Core -Monitor Core is a web server hosting the Core API and browser UI. All user interaction with the connected servers flow through the Core. It is the stateful part of the system, with the application state stored on an instance of MongoDB. +Komodo Core is a web server hosting the Core API and browser UI. All user interaction with the connected servers flow through the Core. It is the stateful part of the system, with the application state stored on an instance of MongoDB. ### Periphery -Monitor Periphery is a small stateless web server that runs on all connected servers. It exposes an API called by Monitor Core to perform actions on the server, get system usage, and container status / logs. It is only intended to be reached from the core, and has an address whitelist to limit the IPs allowed to call this API. +Komodo Periphery is a small stateless web server that runs on all connected servers. It exposes an API called by Komodo Core to perform actions on the server, get system usage, and container status / logs. It is only intended to be reached from the core, and has an address whitelist to limit the IPs allowed to call this API. ## Core API -Monitor exposes powerful functionality over the Core's REST and Websocket API, enabling infrastructure engineers to manage their infrastructure programmatically. There is a [rust crate](https://crates.io/crates/monitor_client) to simplify programmatic interaction with the API, but in general this can be accomplished using any programming language that can make REST requests. +Komodo exposes powerful functionality over the Core's REST and Websocket API, enabling infrastructure engineers to manage their infrastructure programmatically. There is a [rust crate](https://crates.io/crates/komodo_client) to simplify programmatic interaction with the API, but in general this can be accomplished using any programming language that can make REST requests. ## Permissioning -Monitor is a system designed to be used by many users, whether they are developers, operations personnel, or administrators. The ability to affect an applications state is very powerful, so monitor has a granular permissioning system to only provide this functionality to the intended users. The permissioning system is explained in detail in the [permissioning](/docs/permissioning) section. +Komodo is a system designed to be used by many users, whether they are developers, operations personnel, or administrators. The ability to affect an applications state is very powerful, so Komodo has a granular permissioning system to only provide this functionality to the intended users. The permissioning system is explained in detail in the [permissioning](/docs/permissioning) section. User sign-on is possible using username / password, or with Oauth (Github and Google). See [Core Setup](/docs/core-setup). \ No newline at end of file diff --git a/docsite/docs/permissioning.md b/docsite/docs/permissioning.md index a9f4ac077..886d86932 100644 --- a/docsite/docs/permissioning.md +++ b/docsite/docs/permissioning.md @@ -1,6 +1,6 @@ # Permissioning Resources -All monitor resources (servers, builds, deployment) have independant permission tables to allow for users to have granular access to these resources. By default, users do not see any resources until they are given at least read permissions. +All Komodo resources (servers, builds, deployment) have independant permission tables to allow for users to have granular access to these resources. By default, users do not see any resources until they are given at least read permissions. ## Permission Levels @@ -21,13 +21,13 @@ Users can then be **added to multiple User Groups** and they **inherit the group ## Administration -Users can be given admin priviledges by accessing the monitor MongoDB and setting ```admin: true``` on the intended user document. These users have unrestricted access to all monitor resources, like servers, builds, and deployments. Additionally, only these users can update other (non-admin) user's permissions on resources, an action not available to regular users even with **Update** level permissions. +Users can be given admin priviledges by accessing the Komodo MongoDB and setting ```admin: true``` on the intended user document. These users have unrestricted access to all Komodo resources, like servers, builds, and deployments. Additionally, only these users can update other (non-admin) user's permissions on resources, an action not available to regular users even with **Update** level permissions. -Monitor admins are responsible for managing user accounts as well. When a user logs into monitor for the first time, they will not immediately be granted access. An admin must first **enable** the user, which can be done from the 'manage users' page (found in the user dropdown menu in the topbar). Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API. +Komodo admins are responsible for managing user accounts as well. When a user logs into Komodo for the first time, they will not immediately be granted access. An admin must first **enable** the user, which can be done from the 'manage users' page (found in the user dropdown menu in the topbar). Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API. Users also have some configurable global permissions, these are: - create server permission - create build permission -Only users with these permissions (as well as admins) can add additional servers to monitor, and can create additional builds, respectively. \ No newline at end of file +Only users with these permissions (as well as admins) can add additional servers to Komodo, and can create additional builds, respectively. \ No newline at end of file diff --git a/docsite/docs/resources.md b/docsite/docs/resources.md index fef9912d8..48af0e5ff 100644 --- a/docsite/docs/resources.md +++ b/docsite/docs/resources.md @@ -1,6 +1,6 @@ # Resources -Monitor is extendible through the **Resource** abstraction. Entities like `Server`, `Deployment`, and `Stack` are all **Monitor Resources**. +Komodo is extendible through the **Resource** abstraction. Entities like `Server`, `Deployment`, and `Stack` are all **Komodo Resources**. All resources have common traits, such as a unique `name` and `id` amongst all other resources of the same resource type. All resources can be assigned `tags`, which can be used to group related resources. @@ -62,4 +62,4 @@ All resources which depend on git repos / docker registries are able to use thes ## ServerTemplate -- Easily expand your cloud network by storing cloud server lauch templates on various providers.

--- Auto connect the server to monitor on launch, using `User Data` launch scripts. +-- Auto connect the server to Komodo on launch, using `User Data` launch scripts. diff --git a/docsite/docs/sync-resources.md b/docsite/docs/sync-resources.md index 577eb41fa..2b1ad8bbe 100644 --- a/docsite/docs/sync-resources.md +++ b/docsite/docs/sync-resources.md @@ -1,6 +1,6 @@ # Sync Resources -Monitor is able to create, update, delete, and deploy resources declared in TOML files by diffing them against the existing resources, +Komodo is able to create, update, delete, and deploy resources declared in TOML files by diffing them against the existing resources, and apply updates based on the diffs. Push the files to a remote git repo and create a `ResourceSync` pointing to the repo, and the core backend will poll for any updates (you can also manually trigger an update poll / execution in the UI). @@ -16,13 +16,13 @@ automatically execute syncs upon pushes to the configured branch. ### Server -- [Server config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/server/struct.ServerConfig.html) +- [Server config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/server/struct.ServerConfig.html) ```toml [[server]] # Declare a new server -name = "server-01" -description = "the main mogh server" -tags = ["monitor"] +name = "server-prod" +description = "the prod server" +tags = ["prod"] config.address = "http://localhost:8120" config.region = "AshburnDc1" config.enabled = true # default: false @@ -30,8 +30,8 @@ config.enabled = true # default: false ### Builder and build -- [Builder config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/builder/struct.BuilderConfig.html) -- [Build config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/build/struct.BuildConfig.html) +- [Builder config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/builder/struct.BuilderConfig.html) +- [Build config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/build/struct.BuildConfig.html) ```toml [[builder]] # Declare a builder @@ -73,7 +73,7 @@ org.opencontainers.image.licenses = GPL-3.0""" ### Deployments -- [Deployment config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/deployment/struct.DeploymentConfig.html) +- [Deployment config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/deployment/struct.DeploymentConfig.html) ```toml [[variable]] # Declare variables @@ -131,7 +131,7 @@ config.labels = "deployment.type = logger" ### Stack -- [Stack config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/stack/struct.StackConfig.html) +- [Stack config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/stack/struct.StackConfig.html) ```toml [[stack]] @@ -140,7 +140,7 @@ description = "stack test" deploy = true after = ["test-logger-01"] # Stacks can depend on deployments, and vice versa. tags = ["test"] -config.server_id = "monitor-01" +config.server_id = "server-prod" config.file_paths = ["mongo.yaml", "redis.yaml"] config.git_provider = "git.mogh.tech" config.git_account = "mbecker20" # clone private repo by specifying account @@ -149,7 +149,7 @@ config.repo = "mbecker20/stack_test" ### Procedure -- [Procedure config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/procedure/struct.ProcedureConfig.html) +- [Procedure config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/procedure/struct.ProcedureConfig.html) ```toml [[procedure]] @@ -164,7 +164,7 @@ enabled = true # The executions within a stage will be run in parallel. The stage completes when all executions finish. executions = [ { execution.type = "RunBuild", execution.params.build = "test_logger", enabled = true }, - { execution.type = "PullRepo", execution.params.repo = "monitor-periphery", enabled = true }, + { execution.type = "PullRepo", execution.params.repo = "komodo-periphery", enabled = true }, ] [[procedure.config.stage]] @@ -184,25 +184,27 @@ executions = [ ### Repo -- [Repo config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/repo/struct.RepoConfig.html) +- [Repo config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/repo/struct.RepoConfig.html) ```toml [[repo]] -name = "monitor-periphery" +name = "komodo-periphery" description = "Builds new versions of the periphery binary. Requires Rust installed on the host." -tags = ["monitor"] +tags = ["komodo"] config.server_id = "server-01" config.git_provider = "git.mogh.tech" # use an alternate git provider (default is github.com) config.git_account = "mbecker20" -config.repo = "mbecker20/monitor" +config.repo = "mbecker20/komodo" # Run an action after the repo is pulled config.on_pull.path = "." -config.on_pull.command = "/root/.cargo/bin/cargo build -p monitor_periphery --release && cp ./target/release/periphery /root/periphery" +config.on_pull.command = """ +/root/.cargo/bin/cargo build -p komodo_periphery --release && \ +cp ./target/release/periphery /root/periphery""" ``` ### User Group: -- [UserGroup schema](https://docs.rs/monitor_client/latest/monitor_client/entities/toml/struct.UserGroupToml.html) +- [UserGroup schema](https://docs.rs/komodo_client/latest/komodo_client/entities/toml/struct.UserGroupToml.html) ```toml [[user_group]] @@ -213,7 +215,7 @@ all.Build = "Execute" all.Alerter = "Write" permissions = [ # Attach permissions to specific resources by name - { target.type = "Repo", target.id = "monitor-periphery", level = "Execute" }, + { target.type = "Repo", target.id = "komodo-periphery", level = "Execute" }, # Attach permissions to many resources with name matching regex (this uses '^(.+)-(.+)$' as regex expression) { target.type = "Server", target.id = "\\^(.+)-(.+)$\\", level = "Read" }, { target.type = "Deployment", target.id = "\\^immich\\", level = "Execute" }, diff --git a/docsite/docs/version-upgrades.md b/docsite/docs/version-upgrades.md index f7d38ec11..f92ed43bb 100644 --- a/docsite/docs/version-upgrades.md +++ b/docsite/docs/version-upgrades.md @@ -1,7 +1,7 @@ # Version Upgrades -Most version upgrades only require a redeployment of the core container after pulling the latest version, and are fully backward compatible with the periphery clients, which may be updated later on as convenient. This is the default, and will be the case unless specifically mentioned in the [version release notes](https://github.com/mbecker20/monitor/releases). +Most version upgrades only require a redeployment of the Core container after pulling the latest version, and are fully backward compatible with the periphery clients, which may be updated later on as convenient. This is the default, and will be the case unless specifically mentioned in the [version release notes](https://github.com/mbecker20/komodo/releases). Some Core API upgrades may change behavior such as building / cloning, and require updating the Periphery binaries to match the Core version before this functionality can be restored. This will be specifically mentioned in the release notes. -Additionally, some Core API upgrades may include database schema changes, and require a database migration. This can be accomplished by using the [monitor migrator](https://github.com/mbecker20/monitor/blob/main/bin/migrator/README.md) for the particular version upgrade before upgrading the Core API container. \ No newline at end of file +Additionally, some Core API upgrades may include database schema changes, and require a database migration. This can be accomplished by using the [komodo migrator](https://github.com/mbecker20/komodo/blob/main/bin/migrator/README.md) for the particular version upgrade before upgrading the Core API container. \ No newline at end of file diff --git a/docsite/docs/webhooks.md b/docsite/docs/webhooks.md index 7a0f1638f..565987f7c 100644 --- a/docsite/docs/webhooks.md +++ b/docsite/docs/webhooks.md @@ -1,6 +1,6 @@ # Configuring Webhooks -Multiple Monitor resources can take advantage of webhooks from your git provider. Monitor supports incoming webhooks using the Github standard, which is also supported by other providers like Gitea. +Multiple Komodo resources can take advantage of webhooks from your git provider. Komodo supports incoming webhooks using the Github standard, which is also supported by other providers like Gitea. :::note On Gitea, the default "Gitea" webhook type works with the Github standard 👍 @@ -20,16 +20,29 @@ You will have to input some information. 1. The `Payload URL` is the link that you copied in the step above, `Copy the Resource Payload URL`. 2. For Content-type, choose `application/json` -3. For Secret, input the secret you configured in the Monitor Core config (`MONITOR_WEBHOOK_SECRET`). +3. For Secret, input the secret you configured in the Komodo Core config (`KOMODO_WEBHOOK_SECRET`). 4. Enable SSL Verification, if you have proper TLS setup to your git provider (recommended). 5. For "events that trigger the webhook", just the push request is what post people want. 6. Of course, make sure the webhook is "Active" and hit create. ## When does it trigger? -Your git provider will now push this webhook to Monitor on *every* push to *any* branch. However, your `Build`, `Repo`, +Your git provider will now push this webhook to Komodo on *every* push to *any* branch. However, your `Build`, `Repo`, etc. only cares about a specific branch of the repo. Because of this, the webhook will trigger the action **only on pushes to the branch configured on the resource**. -For example, if I make a build, I may point the build to the `release` branch of a particular repo. If I set up a webhook, and push to the `main` branch, the action will *not trigger*. It will only trigger when the push is to the `release` branch. \ No newline at end of file +For example, if I make a build, I may point the build to the `release` branch of a particular repo. If I set up a webhook, and push to the `main` branch, the action will *not trigger*. It will only trigger when the push is to the `release` branch. + +## Procedure webhooks + +Not all actions support webhooks directly, however for those that don't, they can still be triggered via webhook by using a Procedure. Just create a Procedure and configure it to run the action you are looking for, and create a webhook pointing to the Procedure. + +Since Procedures don't specificy a particular branch it should listen for pushes on, this information +must be put in the webhook payload url. Procedures use webhook payload urls of the form: + +``` +/listener/github/procedure// +``` + +If the `` is not provided, it will default to listening on the `main` branch. \ No newline at end of file diff --git a/docsite/docusaurus.config.ts b/docsite/docusaurus.config.ts index e0bae46ff..076f66f5b 100644 --- a/docsite/docusaurus.config.ts +++ b/docsite/docusaurus.config.ts @@ -6,21 +6,21 @@ import dotenv from "dotenv" dotenv.config(); const config: Config = { - title: "Monitor", + title: "Komodo", tagline: "Build and deployment system", favicon: "img/favicon.ico", // Set the production url of your site here - url: "https://docs.monitor.dev", + url: "https://komo.do", // Set the // pathname under which your site is served // For GitHub pages deployment, it is often '//' - // baseUrl: "/monitor/", + // baseUrl: "/komodo/", baseUrl: "/", // GitHub pages deployment config. // If you aren't using GitHub pages, you don't need these. organizationName: "mbecker20", // Usually your GitHub org/user name. - projectName: "monitor", // Usually your repo name. + projectName: "komodo", // Usually your repo name. trailingSlash: false, deploymentBranch: "gh-pages-docs", @@ -69,7 +69,7 @@ const config: Config = { }, }, navbar: { - title: "Monitor", + title: "Komodo", logo: { alt: "monitor lizard", src: "img/logo512.png", @@ -83,13 +83,13 @@ const config: Config = { label: "docs", }, { - href: "https://docs.rs/monitor_client/latest/monitor_client/", - label: "docs.rs", + href: "https://docs.rs/komodo_client/latest/komodo_client/", + label: "Docs.rs", position: "right", }, { - href: "https://github.com/mbecker20/monitor", - label: "github", + href: "https://github.com/mbecker20/komodo", + label: "Github", position: "right", }, ], diff --git a/docsite/package-lock.json b/docsite/package-lock.json index 3d00925bf..4528ac8a4 100644 --- a/docsite/package-lock.json +++ b/docsite/package-lock.json @@ -8,8 +8,8 @@ "name": "docsite", "version": "0.0.0", "dependencies": { - "@docusaurus/core": "^3.4.0", - "@docusaurus/preset-classic": "^3.4.0", + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", "@mdx-js/react": "^3.0.1", "clsx": "^2.1.1", "prism-react-renderer": "^2.3.1", @@ -17,9 +17,9 @@ "react-dom": "^18.3.1" }, "devDependencies": { - "@docusaurus/module-type-aliases": "^3.4.0", - "@docusaurus/tsconfig": "^3.4.0", - "@docusaurus/types": "^3.4.0", + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/tsconfig": "^3.5.2", + "@docusaurus/types": "^3.5.2", "dotenv": "^16.4.5", "typescript": "^5.4.5" }, @@ -69,74 +69,125 @@ } }, "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.23.3.tgz", - "integrity": "sha512-vRHXYCpPlTDE7i6UOy2xE03zHF2C8MEFjPN2v7fRbqVpcOvAUQK81x3Kc21xyb5aSIpYCjWCZbYZuz8Glyzyyg==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.24.0.tgz", + "integrity": "sha512-t63W9BnoXVrGy9iYHBgObNXqYXM3tYXCjDSHeNwnsc324r4o5UiVKUiAB4THQ5z9U5hTj6qUvwg/Ez43ZD85ww==", "dependencies": { - "@algolia/cache-common": "4.23.3" + "@algolia/cache-common": "4.24.0" } }, "node_modules/@algolia/cache-common": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.23.3.tgz", - "integrity": "sha512-h9XcNI6lxYStaw32pHpB1TMm0RuxphF+Ik4o7tcQiodEdpKK+wKufY6QXtba7t3k8eseirEMVB83uFFF3Nu54A==" + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.24.0.tgz", + "integrity": "sha512-emi+v+DmVLpMGhp0V9q9h5CdkURsNmFC+cOS6uK9ndeJm9J4TiqSvPYVu+THUP8P/S08rxf5x2P+p3CfID0Y4g==" }, "node_modules/@algolia/cache-in-memory": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.23.3.tgz", - "integrity": "sha512-yvpbuUXg/+0rbcagxNT7un0eo3czx2Uf0y4eiR4z4SD7SiptwYTpbuS0IHxcLHG3lq22ukx1T6Kjtk/rT+mqNg==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.24.0.tgz", + "integrity": "sha512-gDrt2so19jW26jY3/MkFg5mEypFIPbPoXsQGQWAi6TrCPsNOSEYepBMPlucqWigsmEy/prp5ug2jy/N3PVG/8w==", "dependencies": { - "@algolia/cache-common": "4.23.3" + "@algolia/cache-common": "4.24.0" } }, "node_modules/@algolia/client-account": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.23.3.tgz", - "integrity": "sha512-hpa6S5d7iQmretHHF40QGq6hz0anWEHGlULcTIT9tbUssWUriN9AUXIFQ8Ei4w9azD0hc1rUok9/DeQQobhQMA==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.24.0.tgz", + "integrity": "sha512-adcvyJ3KjPZFDybxlqnf+5KgxJtBjwTPTeyG2aOyoJvx0Y8dUQAEOEVOJ/GBxX0WWNbmaSrhDURMhc+QeevDsA==", "dependencies": { - "@algolia/client-common": "4.23.3", - "@algolia/client-search": "4.23.3", - "@algolia/transporter": "4.23.3" + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-account/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" } }, "node_modules/@algolia/client-analytics": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.23.3.tgz", - "integrity": "sha512-LBsEARGS9cj8VkTAVEZphjxTjMVCci+zIIiRhpFun9jGDUlS1XmhCW7CTrnaWeIuCQS/2iPyRqSy1nXPjcBLRA==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.24.0.tgz", + "integrity": "sha512-y8jOZt1OjwWU4N2qr8G4AxXAzaa8DBvyHTWlHzX/7Me1LX8OayfgHexqrsL4vSBcoMmVw2XnVW9MhL+Y2ZDJXg==", "dependencies": { - "@algolia/client-common": "4.23.3", - "@algolia/client-search": "4.23.3", - "@algolia/requester-common": "4.23.3", - "@algolia/transporter": "4.23.3" + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-analytics/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" } }, "node_modules/@algolia/client-common": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.23.3.tgz", - "integrity": "sha512-l6EiPxdAlg8CYhroqS5ybfIczsGUIAC47slLPOMDeKSVXYG1n0qGiz4RjAHLw2aD0xzh2EXZ7aRguPfz7UKDKw==", - "dependencies": { - "@algolia/requester-common": "4.23.3", - "@algolia/transporter": "4.23.3" + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-5.2.3.tgz", + "integrity": "sha512-zqfcbgjYR72Y/rx/+/6g5Li/eV33yhRq5mkGbU06JYBzvGq6viy0gZl1ckCFhLLifKzXZ4yzUQTw/KG6FV+smg==", + "peer": true, + "engines": { + "node": ">= 14.0.0" } }, "node_modules/@algolia/client-personalization": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.23.3.tgz", - "integrity": "sha512-3E3yF3Ocr1tB/xOZiuC3doHQBQ2zu2MPTYZ0d4lpfWads2WTKG7ZzmGnsHmm63RflvDeLK/UVx7j2b3QuwKQ2g==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.24.0.tgz", + "integrity": "sha512-l5FRFm/yngztweU0HdUzz1rC4yoWCFo3IF+dVIVTfEPg906eZg5BOd1k0K6rZx5JzyyoP4LdmOikfkfGsKVE9w==", "dependencies": { - "@algolia/client-common": "4.23.3", - "@algolia/requester-common": "4.23.3", - "@algolia/transporter": "4.23.3" + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/client-personalization/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" } }, "node_modules/@algolia/client-search": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.23.3.tgz", - "integrity": "sha512-P4VAKFHqU0wx9O+q29Q8YVuaowaZ5EM77rxfmGnkHUJggh28useXQdopokgwMeYw2XUht49WX5RcTQ40rZIabw==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.2.3.tgz", + "integrity": "sha512-xXdCg8vpiwE8gqSyvjxq8V3qbFa+gHasY5epIz718IByWv3WKLLi/n4SMIfB/zRwXTLVWeGOH/UJSz5VCnAAqg==", + "peer": true, "dependencies": { - "@algolia/client-common": "4.23.3", - "@algolia/requester-common": "4.23.3", - "@algolia/transporter": "4.23.3" + "@algolia/client-common": "5.2.3", + "@algolia/requester-browser-xhr": "5.2.3", + "@algolia/requester-node-http": "5.2.3" + }, + "engines": { + "node": ">= 14.0.0" } }, "node_modules/@algolia/events": { @@ -145,65 +196,108 @@ "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" }, "node_modules/@algolia/logger-common": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.23.3.tgz", - "integrity": "sha512-y9kBtmJwiZ9ZZ+1Ek66P0M68mHQzKRxkW5kAAXYN/rdzgDN0d2COsViEFufxJ0pb45K4FRcfC7+33YB4BLrZ+g==" + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.24.0.tgz", + "integrity": "sha512-LLUNjkahj9KtKYrQhFKCzMx0BY3RnNP4FEtO+sBybCjJ73E8jNdaKJ/Dd8A/VA4imVHP5tADZ8pn5B8Ga/wTMA==" }, "node_modules/@algolia/logger-console": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.23.3.tgz", - "integrity": "sha512-8xoiseoWDKuCVnWP8jHthgaeobDLolh00KJAdMe9XPrWPuf1by732jSpgy2BlsLTaT9m32pHI8CRfrOqQzHv3A==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.24.0.tgz", + "integrity": "sha512-X4C8IoHgHfiUROfoRCV+lzSy+LHMgkoEEU1BbKcsfnV0i0S20zyy0NLww9dwVHUWNfPPxdMU+/wKmLGYf96yTg==", "dependencies": { - "@algolia/logger-common": "4.23.3" + "@algolia/logger-common": "4.24.0" } }, "node_modules/@algolia/recommend": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.23.3.tgz", - "integrity": "sha512-9fK4nXZF0bFkdcLBRDexsnGzVmu4TSYZqxdpgBW2tEyfuSSY54D4qSRkLmNkrrz4YFvdh2GM1gA8vSsnZPR73w==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/recommend/-/recommend-4.24.0.tgz", + "integrity": "sha512-P9kcgerfVBpfYHDfVZDvvdJv0lEoCvzNlOy2nykyt5bK8TyieYyiD0lguIJdRZZYGre03WIAFf14pgE+V+IBlw==", "dependencies": { - "@algolia/cache-browser-local-storage": "4.23.3", - "@algolia/cache-common": "4.23.3", - "@algolia/cache-in-memory": "4.23.3", - "@algolia/client-common": "4.23.3", - "@algolia/client-search": "4.23.3", - "@algolia/logger-common": "4.23.3", - "@algolia/logger-console": "4.23.3", - "@algolia/requester-browser-xhr": "4.23.3", - "@algolia/requester-common": "4.23.3", - "@algolia/requester-node-http": "4.23.3", - "@algolia/transporter": "4.23.3" + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/@algolia/recommend/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" } }, "node_modules/@algolia/requester-browser-xhr": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.23.3.tgz", - "integrity": "sha512-jDWGIQ96BhXbmONAQsasIpTYWslyjkiGu0Quydjlowe+ciqySpiDUrJHERIRfELE5+wFc7hc1Q5hqjGoV7yghw==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-5.2.3.tgz", + "integrity": "sha512-lezcE4E7ax7JkDGDKA/xAnyAY9p9LZ4AxzsyL0pksqUpOvn4U0msP553M2yJRfsxxdGDp15noCnPuRsh7u8dMg==", + "peer": true, "dependencies": { - "@algolia/requester-common": "4.23.3" + "@algolia/client-common": "5.2.3" + }, + "engines": { + "node": ">= 14.0.0" } }, "node_modules/@algolia/requester-common": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.23.3.tgz", - "integrity": "sha512-xloIdr/bedtYEGcXCiF2muajyvRhwop4cMZo+K2qzNht0CMzlRkm8YsDdj5IaBhshqfgmBb3rTg4sL4/PpvLYw==" + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.24.0.tgz", + "integrity": "sha512-k3CXJ2OVnvgE3HMwcojpvY6d9kgKMPRxs/kVohrwF5WMr2fnqojnycZkxPoEg+bXm8fi5BBfFmOqgYztRtHsQA==" }, "node_modules/@algolia/requester-node-http": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.23.3.tgz", - "integrity": "sha512-zgu++8Uj03IWDEJM3fuNl34s746JnZOWn1Uz5taV1dFyJhVM/kTNw9Ik7YJWiUNHJQXcaD8IXD1eCb0nq/aByA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-5.2.3.tgz", + "integrity": "sha512-xTxsRnJqxG1dylIkxmflrHO9LJfJKjSHqEF5yGdRrtnqIEvb2hiQPCHm2XwqxMa3NBcf6lmydGfJqhPLnRJwtw==", + "peer": true, "dependencies": { - "@algolia/requester-common": "4.23.3" + "@algolia/client-common": "5.2.3" + }, + "engines": { + "node": ">= 14.0.0" } }, "node_modules/@algolia/transporter": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.23.3.tgz", - "integrity": "sha512-Wjl5gttqnf/gQKJA+dafnD0Y6Yw97yvfY8R9h0dQltX1GXTgNs1zWgvtWW0tHl1EgMdhAyw189uWiZMnL3QebQ==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.24.0.tgz", + "integrity": "sha512-86nI7w6NzWxd1Zp9q3413dRshDqAzSbsQjhcDhPIatEFiZrL1/TjnHL8S7jVKFePlIMzDsZWXAXwXzcok9c5oA==", "dependencies": { - "@algolia/cache-common": "4.23.3", - "@algolia/logger-common": "4.23.3", - "@algolia/requester-common": "4.23.3" + "@algolia/cache-common": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/requester-common": "4.24.0" } }, "node_modules/@ampproject/remapping": { @@ -486,9 +580,9 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.7.tgz", - "integrity": "sha512-Rq76wjt7yz9AAc1KnlRKNAi/dMSVWgDRx43FHoJEbcYU6xOWaE2dVPwcdTukJrjxS65GITyfbvEYHvkirZ6uEg==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", "engines": { "node": ">=6.9.0" } @@ -1578,11 +1672,11 @@ } }, "node_modules/@babel/plugin-transform-react-constant-elements": { - "version": "7.24.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.24.7.tgz", - "integrity": "sha512-7LidzZfUXyfZ8/buRW6qIIHBY8wAZ1OrY9c/wTr8YhZ6vMPo+Uc/CVFLYY1spZrEQlD4w5u8wjqk5NQ3OVqQKA==", + "version": "7.25.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.25.1.tgz", + "integrity": "sha512-SLV/giH/V4SmloZ6Dt40HjTGTAIkxn33TVIHxNGNvo8ezMhrxBkzisj4op1KZYPIOHFLqhv60OHvX+YRu4xbmQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.24.7" + "@babel/helper-plugin-utils": "^7.24.8" }, "engines": { "node": ">=6.9.0" @@ -2099,18 +2193,18 @@ } }, "node_modules/@docsearch/css": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.0.tgz", - "integrity": "sha512-+sbxb71sWre+PwDK7X2T8+bhS6clcVMLwBPznX45Qu6opJcgRjAp7gYSDzVFp187J+feSj5dNBN1mJoi6ckkUQ==" + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/css/-/css-3.6.1.tgz", + "integrity": "sha512-VtVb5DS+0hRIprU2CO6ZQjK2Zg4QU5HrDM1+ix6rT0umsYvFvatMAnf97NHZlVWDaaLlx7GRfR/7FikANiM2Fg==" }, "node_modules/@docsearch/react": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.0.tgz", - "integrity": "sha512-HUFut4ztcVNmqy9gp/wxNbC7pTOHhgVVkHVGCACTuLhUKUhKAF9KYHJtMiLUJxEqiFLQiuri1fWF8zqwM/cu1w==", + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/@docsearch/react/-/react-3.6.1.tgz", + "integrity": "sha512-qXZkEPvybVhSXj0K7U3bXc233tk5e8PfhoZ6MhPOiik/qUQxYC+Dn9DnoS7CxHQQhHfCvTiN0eY9M12oRghEXw==", "dependencies": { "@algolia/autocomplete-core": "1.9.3", "@algolia/autocomplete-preset-algolia": "1.9.3", - "@docsearch/css": "3.6.0", + "@docsearch/css": "3.6.1", "algoliasearch": "^4.19.1" }, "peerDependencies": { @@ -2135,9 +2229,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.4.0.tgz", - "integrity": "sha512-g+0wwmN2UJsBqy2fQRQ6fhXruoEa62JDeEa5d8IdTJlMoaDaEDfHh7WjwGRn4opuTQWpjAwP/fbcgyHKlE+64w==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.5.2.tgz", + "integrity": "sha512-4Z1WkhCSkX4KO0Fw5m/Vuc7Q3NxBG53NE5u59Rs96fWkMPZVSrzEPP16/Nk6cWb/shK7xXPndTmalJtw7twL/w==", "dependencies": { "@babel/core": "^7.23.3", "@babel/generator": "^7.23.3", @@ -2149,12 +2243,12 @@ "@babel/runtime": "^7.22.6", "@babel/runtime-corejs3": "^7.22.6", "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/cssnano-preset": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "autoprefixer": "^10.4.14", "babel-loader": "^9.1.3", "babel-plugin-dynamic-import-node": "^2.3.3", @@ -2215,14 +2309,15 @@ "node": ">=18.0" }, "peerDependencies": { + "@mdx-js/react": "^3.0.0", "react": "^18.0.0", "react-dom": "^18.0.0" } }, "node_modules/@docusaurus/cssnano-preset": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.4.0.tgz", - "integrity": "sha512-qwLFSz6v/pZHy/UP32IrprmH5ORce86BGtN0eBtG75PpzQJAzp9gefspox+s8IEOr0oZKuQ/nhzZ3xwyc3jYJQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.5.2.tgz", + "integrity": "sha512-D3KiQXOMA8+O0tqORBrTOEQyQxNIfPm9jEaJoALjjSjc2M/ZAWcUfPQEnwr2JB2TadHw2gqWgpZckQmrVWkytA==", "dependencies": { "cssnano-preset-advanced": "^6.1.2", "postcss": "^8.4.38", @@ -2234,9 +2329,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.4.0.tgz", - "integrity": "sha512-bZwkX+9SJ8lB9kVRkXw+xvHYSMGG4bpYHKGXeXFvyVc79NMeeBSGgzd4TQLHH+DYeOJoCdl8flrFJVxlZ0wo/Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.5.2.tgz", + "integrity": "sha512-LHC540SGkeLfyT3RHK3gAMK6aS5TRqOD4R72BEU/DE2M/TY8WwEUAMY576UUc/oNJXv8pGhBmQB6N9p3pt8LQw==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.6.0" @@ -2246,13 +2341,13 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.4.0.tgz", - "integrity": "sha512-kSSbrrk4nTjf4d+wtBA9H+FGauf2gCax89kV8SUSJu3qaTdSIKdWERlngsiHaCFgZ7laTJ8a67UFf+xlFPtuTw==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.5.2.tgz", + "integrity": "sha512-ku3xO9vZdwpiMIVd8BzWV0DCqGEbCP5zs1iHfKX50vw6jX8vQo0ylYo1YJMZyz6e+JFJ17HYHT5FzVidz2IflA==", "dependencies": { - "@docusaurus/logger": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@mdx-js/mdx": "^3.0.0", "@slorber/remark-comment": "^1.0.0", "escape-html": "^1.0.3", @@ -2284,11 +2379,11 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.4.0.tgz", - "integrity": "sha512-A1AyS8WF5Bkjnb8s+guTDuYmUiwJzNrtchebBHpc0gz0PyHJNMaybUlSrmJjHVcGrya0LKI4YcR3lBDQfXRYLw==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.5.2.tgz", + "integrity": "sha512-Z+Xu3+2rvKef/YKTMxZHsEXp1y92ac0ngjDiExRdqGTmEKtCUpkbNYH8v5eXo5Ls+dnW88n6WTa+Q54kLOkwPg==", "dependencies": { - "@docusaurus/types": "3.4.0", + "@docusaurus/types": "3.5.2", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2302,18 +2397,19 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.4.0.tgz", - "integrity": "sha512-vv6ZAj78ibR5Jh7XBUT4ndIjmlAxkijM3Sx5MAAzC1gyv0vupDQNhzuFg1USQmQVj3P5I6bquk12etPV3LJ+Xw==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.5.2.tgz", + "integrity": "sha512-R7ghWnMvjSf+aeNDH0K4fjyQnt5L0KzUEnUhmf1e3jZrv3wogeytZNN6n7X8yHcMsuZHPOrctQhXWnmxu+IRRg==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", - "cheerio": "^1.0.0-rc.12", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", + "cheerio": "1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^11.1.1", "lodash": "^4.17.21", @@ -2328,23 +2424,25 @@ "node": ">=18.0" }, "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", "react": "^18.0.0", "react-dom": "^18.0.0" } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.4.0.tgz", - "integrity": "sha512-HkUCZffhBo7ocYheD9oZvMcDloRnGhBMOZRyVcAQRFmZPmNqSyISlXA1tQCIxW+r478fty97XXAGjNYzBjpCsg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.5.2.tgz", + "integrity": "sha512-Bt+OXn/CPtVqM3Di44vHjE7rPCEsRCB/DMo2qoOuozB9f7+lsdrHvD0QCHdBs0uhz6deYJDppAr2VgqybKPlVQ==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@types/react-router-config": "^5.0.7", "combine-promises": "^1.1.0", "fs-extra": "^11.1.1", @@ -2363,15 +2461,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.4.0.tgz", - "integrity": "sha512-h2+VN/0JjpR8fIkDEAoadNjfR3oLzB+v1qSXbIAKjQ46JAHx3X22n9nqS+BWSQnTnp1AjkjSvZyJMekmcwxzxg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.5.2.tgz", + "integrity": "sha512-WzhHjNpoQAUz/ueO10cnundRz+VUtkjFhhaQ9jApyv1a46FPURO4cef89pyNIOMny1fjDz/NUN2z6Yi+5WUrCw==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "fs-extra": "^11.1.1", "tslib": "^2.6.0", "webpack": "^5.88.1" @@ -2385,13 +2483,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.4.0.tgz", - "integrity": "sha512-uV7FDUNXGyDSD3PwUaf5YijX91T5/H9SX4ErEcshzwgzWwBtK37nUWPU3ZLJfeTavX3fycTOqk9TglpOLaWkCg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.5.2.tgz", + "integrity": "sha512-kBK6GlN0itCkrmHuCS6aX1wmoWc5wpd5KJlqQ1FyrF0cLDnvsYSnh7+ftdwzt7G6lGBho8lrVwkkL9/iQvaSOA==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", "fs-extra": "^11.1.1", "react-json-view-lite": "^1.2.0", "tslib": "^2.6.0" @@ -2405,13 +2503,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.4.0.tgz", - "integrity": "sha512-mCArluxEGi3cmYHqsgpGGt3IyLCrFBxPsxNZ56Mpur0xSlInnIHoeLDH7FvVVcPJRPSQ9/MfRqLsainRw+BojA==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.5.2.tgz", + "integrity": "sha512-rjEkJH/tJ8OXRE9bwhV2mb/WP93V441rD6XnM6MIluu7rk8qg38iSxS43ga2V2Q/2ib53PcqbDEJDG/yWQRJhQ==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "tslib": "^2.6.0" }, "engines": { @@ -2423,13 +2521,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.4.0.tgz", - "integrity": "sha512-Dsgg6PLAqzZw5wZ4QjUYc8Z2KqJqXxHxq3vIoyoBWiLEEfigIs7wHR+oiWUQy3Zk9MIk6JTYj7tMoQU0Jm3nqA==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.5.2.tgz", + "integrity": "sha512-lm8XL3xLkTPHFKKjLjEEAHUrW0SZBSHBE1I+i/tmYMBsjCcUB5UJ52geS5PSiOCFVR74tbPGcPHEV/gaaxFeSA==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@types/gtag.js": "^0.0.12", "tslib": "^2.6.0" }, @@ -2442,13 +2540,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.4.0.tgz", - "integrity": "sha512-O9tX1BTwxIhgXpOLpFDueYA9DWk69WCbDRrjYoMQtFHSkTyE7RhNgyjSPREUWJb9i+YUg3OrsvrBYRl64FCPCQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.5.2.tgz", + "integrity": "sha512-QkpX68PMOMu10Mvgvr5CfZAzZQFx8WLlOiUQ/Qmmcl6mjGK6H21WLT5x7xDmcpCoKA/3CegsqIqBR+nA137lQg==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "tslib": "^2.6.0" }, "engines": { @@ -2460,16 +2558,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.4.0.tgz", - "integrity": "sha512-+0VDvx9SmNrFNgwPoeoCha+tRoAjopwT0+pYO1xAbyLcewXSemq+eLxEa46Q1/aoOaJQ0qqHELuQM7iS2gp33Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.5.2.tgz", + "integrity": "sha512-DnlqYyRAdQ4NHY28TfHuVk414ft2uruP4QWCH//jzpHjqvKyXjj2fmDtI8RPUBh9K8iZKFMHRnLtzJKySPWvFA==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "fs-extra": "^11.1.1", "sitemap": "^7.1.1", "tslib": "^2.6.0" @@ -2483,23 +2581,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.4.0.tgz", - "integrity": "sha512-Ohj6KB7siKqZaQhNJVMBBUzT3Nnp6eTKqO+FXO3qu/n1hJl3YLwVKTWBg28LF7MWrKu46UuYavwMRxud0VyqHg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.5.2.tgz", + "integrity": "sha512-3ihfXQ95aOHiLB5uCu+9PRy2gZCeSZoDcqpnDvf3B+sTrMvMTr8qRUzBvWkoIqc82yG5prCboRjk1SVILKx6sg==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/plugin-content-blog": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/plugin-content-pages": "3.4.0", - "@docusaurus/plugin-debug": "3.4.0", - "@docusaurus/plugin-google-analytics": "3.4.0", - "@docusaurus/plugin-google-gtag": "3.4.0", - "@docusaurus/plugin-google-tag-manager": "3.4.0", - "@docusaurus/plugin-sitemap": "3.4.0", - "@docusaurus/theme-classic": "3.4.0", - "@docusaurus/theme-common": "3.4.0", - "@docusaurus/theme-search-algolia": "3.4.0", - "@docusaurus/types": "3.4.0" + "@docusaurus/core": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/plugin-debug": "3.5.2", + "@docusaurus/plugin-google-analytics": "3.5.2", + "@docusaurus/plugin-google-gtag": "3.5.2", + "@docusaurus/plugin-google-tag-manager": "3.5.2", + "@docusaurus/plugin-sitemap": "3.5.2", + "@docusaurus/theme-classic": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-search-algolia": "3.5.2", + "@docusaurus/types": "3.5.2" }, "engines": { "node": ">=18.0" @@ -2510,26 +2608,26 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.4.0.tgz", - "integrity": "sha512-0IPtmxsBYv2adr1GnZRdMkEQt1YW6tpzrUPj02YxNpvJ5+ju4E13J5tB4nfdaen/tfR1hmpSPlTFPvTf4kwy8Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.5.2.tgz", + "integrity": "sha512-XRpinSix3NBv95Rk7xeMF9k4safMkwnpSgThn0UNQNumKvmcIYjfkwfh2BhwYh/BxMXQHJ/PdmNh22TQFpIaYg==", "dependencies": { - "@docusaurus/core": "3.4.0", - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/plugin-content-blog": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/plugin-content-pages": "3.4.0", - "@docusaurus/theme-common": "3.4.0", - "@docusaurus/theme-translations": "3.4.0", - "@docusaurus/types": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/plugin-content-blog": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/plugin-content-pages": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/types": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "copy-text-to-clipboard": "^3.2.0", - "infima": "0.2.0-alpha.43", + "infima": "0.2.0-alpha.44", "lodash": "^4.17.21", "nprogress": "^0.2.0", "postcss": "^8.4.26", @@ -2549,17 +2647,14 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.4.0.tgz", - "integrity": "sha512-0A27alXuv7ZdCg28oPE8nH/Iz73/IUejVaCazqu9elS4ypjiLhK3KfzdSQBnL/g7YfHSlymZKdiOHEo8fJ0qMA==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.5.2.tgz", + "integrity": "sha512-QXqlm9S6x9Ibwjs7I2yEDgsCocp708DrCrgHgKwg2n2AY0YQ6IjU0gAK35lHRLOvAoJUfCKpQAwUykB0R7+Eew==", "dependencies": { - "@docusaurus/mdx-loader": "3.4.0", - "@docusaurus/module-type-aliases": "3.4.0", - "@docusaurus/plugin-content-blog": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/plugin-content-pages": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", + "@docusaurus/mdx-loader": "3.5.2", + "@docusaurus/module-type-aliases": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2573,23 +2668,24 @@ "node": ">=18.0" }, "peerDependencies": { + "@docusaurus/plugin-content-docs": "*", "react": "^18.0.0", "react-dom": "^18.0.0" } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.4.0.tgz", - "integrity": "sha512-aiHFx7OCw4Wck1z6IoShVdUWIjntC8FHCw9c5dR8r3q4Ynh+zkS8y2eFFunN/DL6RXPzpnvKCg3vhLQYJDmT9Q==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.5.2.tgz", + "integrity": "sha512-qW53kp3VzMnEqZGjakaV90sst3iN1o32PH+nawv1uepROO8aEGxptcq2R5rsv7aBShSRbZwIobdvSYKsZ5pqvA==", "dependencies": { "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.4.0", - "@docusaurus/logger": "3.4.0", - "@docusaurus/plugin-content-docs": "3.4.0", - "@docusaurus/theme-common": "3.4.0", - "@docusaurus/theme-translations": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-validation": "3.4.0", + "@docusaurus/core": "3.5.2", + "@docusaurus/logger": "3.5.2", + "@docusaurus/plugin-content-docs": "3.5.2", + "@docusaurus/theme-common": "3.5.2", + "@docusaurus/theme-translations": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-validation": "3.5.2", "algoliasearch": "^4.18.0", "algoliasearch-helper": "^3.13.3", "clsx": "^2.0.0", @@ -2608,9 +2704,9 @@ } }, "node_modules/@docusaurus/theme-translations": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.4.0.tgz", - "integrity": "sha512-zSxCSpmQCCdQU5Q4CnX/ID8CSUUI3fvmq4hU/GNP/XoAWtXo9SAVnM3TzpU8Gb//H3WCsT8mJcTfyOk3d9ftNg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.5.2.tgz", + "integrity": "sha512-GPZLcu4aT1EmqSTmbdpVrDENGR2yObFEX8ssEFYTCiAIVc0EihNSdOIBTazUvgNqwvnoU1A8vIs1xyzc3LITTw==", "dependencies": { "fs-extra": "^11.1.1", "tslib": "^2.6.0" @@ -2620,15 +2716,15 @@ } }, "node_modules/@docusaurus/tsconfig": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.4.0.tgz", - "integrity": "sha512-0qENiJ+TRaeTzcg4olrnh0BQ7eCxTgbYWBnWUeQDc84UYkt/T3pDNnm3SiQkqPb+YQ1qtYFlC0RriAElclo8Dg==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/tsconfig/-/tsconfig-3.5.2.tgz", + "integrity": "sha512-rQ7toURCFnWAIn8ubcquDs0ewhPwviMzxh6WpRjBW7sJVCXb6yzwUaY3HMNa0VXCFw+qkIbFywrMTf+Pb4uHWQ==", "dev": true }, "node_modules/@docusaurus/types": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.4.0.tgz", - "integrity": "sha512-4jcDO8kXi5Cf9TcyikB/yKmz14f2RZ2qTRerbHAsS+5InE9ZgSLBNLsewtFTcTOXSVcbU3FoGOzcNWAmU1TR0A==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.5.2.tgz", + "integrity": "sha512-N6GntLXoLVUwkZw7zCxwy9QiuEXIcTVzA9AkmNw16oc0AP3SXLrMmDMMBIfgqwuKWa6Ox6epHol9kMtJqekACw==", "dependencies": { "@mdx-js/mdx": "^3.0.0", "@types/history": "^4.7.11", @@ -2646,12 +2742,12 @@ } }, "node_modules/@docusaurus/utils": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.4.0.tgz", - "integrity": "sha512-fRwnu3L3nnWaXOgs88BVBmG1yGjcQqZNHG+vInhEa2Sz2oQB+ZjbEMO5Rh9ePFpZ0YDiDUhpaVjwmS+AU2F14g==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.5.2.tgz", + "integrity": "sha512-33QvcNFh+Gv+C2dP9Y9xWEzMgf3JzrpL2nW9PopidiohS1nDcyknKRx2DWaFvyVTTYIkkABVSr073VTj/NITNA==", "dependencies": { - "@docusaurus/logger": "3.4.0", - "@docusaurus/utils-common": "3.4.0", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils-common": "3.5.2", "@svgr/webpack": "^8.1.0", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -2684,9 +2780,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.4.0.tgz", - "integrity": "sha512-NVx54Wr4rCEKsjOH5QEVvxIqVvm+9kh7q8aYTU5WzUU9/Hctd6aTrcZ3G0Id4zYJ+AeaG5K5qHA4CY5Kcm2iyQ==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.5.2.tgz", + "integrity": "sha512-i0AZjHiRgJU6d7faQngIhuHKNrszpL/SHQPgF1zH4H+Ij6E9NBYGy6pkcGWToIv7IVPbs+pQLh1P3whn0gWXVg==", "dependencies": { "tslib": "^2.6.0" }, @@ -2703,13 +2799,13 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.4.0.tgz", - "integrity": "sha512-hYQ9fM+AXYVTWxJOT1EuNaRnrR2WGpRdLDQG07O8UOpsvCPWUVOeo26Rbm0JWY2sGLfzAb+tvJ62yF+8F+TV0g==", + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.5.2.tgz", + "integrity": "sha512-m+Foq7augzXqB6HufdS139PFxDC5d5q2QKZy8q0qYYvGdI6nnlNsGH4cIGsgBnV7smz+mopl3g4asbSDvMV0jA==", "dependencies": { - "@docusaurus/logger": "3.4.0", - "@docusaurus/utils": "3.4.0", - "@docusaurus/utils-common": "3.4.0", + "@docusaurus/logger": "3.5.2", + "@docusaurus/utils": "3.5.2", + "@docusaurus/utils-common": "3.5.2", "fs-extra": "^11.2.0", "joi": "^17.9.2", "js-yaml": "^4.1.0", @@ -3297,24 +3393,6 @@ "@types/ms": "*" } }, - "node_modules/@types/eslint": { - "version": "8.56.10", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz", - "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", - "dependencies": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "node_modules/@types/eslint-scope": { - "version": "3.7.7", - "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", - "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", - "dependencies": { - "@types/eslint": "*", - "@types/estree": "*" - } - }, "node_modules/@types/estree": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", @@ -3568,9 +3646,9 @@ } }, "node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" }, "node_modules/@types/ws": { "version": "8.5.10", @@ -3781,10 +3859,10 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-assertions": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", - "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", + "node_modules/acorn-import-attributes": { + "version": "1.9.5", + "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", + "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", "peerDependencies": { "acorn": "^8" } @@ -3868,31 +3946,31 @@ } }, "node_modules/algoliasearch": { - "version": "4.23.3", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.23.3.tgz", - "integrity": "sha512-Le/3YgNvjW9zxIQMRhUHuhiUjAlKY/zsdZpfq4dlLqg6mEm0nL6yk+7f2hDOtLpxsgE4jSzDmvHL7nXdBp5feg==", + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.24.0.tgz", + "integrity": "sha512-bf0QV/9jVejssFBmz2HQLxUadxk574t4iwjCKp5E7NBzwKkrDEhKPISIIjAU/p6K5qDx3qoeh4+26zWN1jmw3g==", "dependencies": { - "@algolia/cache-browser-local-storage": "4.23.3", - "@algolia/cache-common": "4.23.3", - "@algolia/cache-in-memory": "4.23.3", - "@algolia/client-account": "4.23.3", - "@algolia/client-analytics": "4.23.3", - "@algolia/client-common": "4.23.3", - "@algolia/client-personalization": "4.23.3", - "@algolia/client-search": "4.23.3", - "@algolia/logger-common": "4.23.3", - "@algolia/logger-console": "4.23.3", - "@algolia/recommend": "4.23.3", - "@algolia/requester-browser-xhr": "4.23.3", - "@algolia/requester-common": "4.23.3", - "@algolia/requester-node-http": "4.23.3", - "@algolia/transporter": "4.23.3" + "@algolia/cache-browser-local-storage": "4.24.0", + "@algolia/cache-common": "4.24.0", + "@algolia/cache-in-memory": "4.24.0", + "@algolia/client-account": "4.24.0", + "@algolia/client-analytics": "4.24.0", + "@algolia/client-common": "4.24.0", + "@algolia/client-personalization": "4.24.0", + "@algolia/client-search": "4.24.0", + "@algolia/logger-common": "4.24.0", + "@algolia/logger-console": "4.24.0", + "@algolia/recommend": "4.24.0", + "@algolia/requester-browser-xhr": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/requester-node-http": "4.24.0", + "@algolia/transporter": "4.24.0" } }, "node_modules/algoliasearch-helper": { - "version": "3.21.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.21.0.tgz", - "integrity": "sha512-hjVOrL15I3Y3K8xG0icwG1/tWE+MocqBrhW6uVBWpU+/kVEMK0BnM2xdssj6mZM61eJ4iRxHR0djEI3ENOpR8w==", + "version": "3.22.4", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.22.4.tgz", + "integrity": "sha512-fvBCywguW9f+939S6awvRMstqMF1XXcd2qs1r1aGqL/PJ1go/DqN06tWmDVmhCDqBJanm++imletrQWf0G2S1g==", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -3900,6 +3978,41 @@ "algoliasearch": ">= 3.1 < 6" } }, + "node_modules/algoliasearch/node_modules/@algolia/client-common": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.24.0.tgz", + "integrity": "sha512-bc2ROsNL6w6rqpl5jj/UywlIYC21TwSSoFHKl01lYirGMW+9Eek6r02Tocg4gZ8HAw3iBvu6XQiM3BEbmEMoiA==", + "dependencies": { + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/client-search": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.24.0.tgz", + "integrity": "sha512-uRW6EpNapmLAD0mW47OXqTP8eiIx5F6qN9/x/7HHO6owL3N1IXqydGwW5nhDFBrV+ldouro2W1VX3XlcUXEFCA==", + "dependencies": { + "@algolia/client-common": "4.24.0", + "@algolia/requester-common": "4.24.0", + "@algolia/transporter": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-browser-xhr": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.24.0.tgz", + "integrity": "sha512-Z2NxZMb6+nVXSjF13YpjYTdvV3032YTBSGm2vnYvYPA6mMxzM3v5rsCiSspndn9rzIW4Qp1lPHBvuoKJV6jnAA==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, + "node_modules/algoliasearch/node_modules/@algolia/requester-node-http": { + "version": "4.24.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.24.0.tgz", + "integrity": "sha512-JF18yTjNOVYvU/L3UosRcvbPMGT9B+/GQWNWnenIImglzNVGpyzChkXLnrSf6uxwVNO6ESGu6oN8MqcGQcjQJw==", + "dependencies": { + "@algolia/requester-common": "4.24.0" + } + }, "node_modules/ansi-align": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", @@ -3995,9 +4108,9 @@ } }, "node_modules/astring": { - "version": "1.8.6", - "resolved": "https://registry.npmjs.org/astring/-/astring-1.8.6.tgz", - "integrity": "sha512-ISvCdHdlTDlH5IpxQJIex7BWBywFWgjJSVdwst+/iQCoEYnyOaQ95+X1JGshuBjGp6nxKUy1jMgE3zPqN7fQdg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", "bin": { "astring": "bin/astring" } @@ -4011,9 +4124,9 @@ } }, "node_modules/autoprefixer": { - "version": "10.4.19", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.19.tgz", - "integrity": "sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==", + "version": "10.4.20", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", + "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", "funding": [ { "type": "opencollective", @@ -4029,11 +4142,11 @@ } ], "dependencies": { - "browserslist": "^4.23.0", - "caniuse-lite": "^1.0.30001599", + "browserslist": "^4.23.3", + "caniuse-lite": "^1.0.30001646", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", + "picocolors": "^1.0.1", "postcss-value-parser": "^4.2.0" }, "bin": { @@ -4252,9 +4365,9 @@ } }, "node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", "funding": [ { "type": "opencollective", @@ -4270,10 +4383,10 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" }, "bin": { "browserslist": "cli.js" @@ -4378,9 +4491,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001615", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001615.tgz", - "integrity": "sha512-1IpazM5G3r38meiae0bHRnPhz+CBQ3ZLqbQMtrg+AsTPKAXgW38JNsXkyZ+v8waCsDmPq87lmfun5Q2AGysNEQ==", + "version": "1.0.30001655", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001655.tgz", + "integrity": "sha512-jRGVy3iSGO5Uutn2owlb5gR6qsGngTw9ZTb4ali9f3glshcNmJ2noam4Mo9zia5P9Dk3jNNydy7vQjuE5dQmfg==", "funding": [ { "type": "opencollective", @@ -5651,9 +5764,9 @@ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" }, "node_modules/electron-to-chromium": { - "version": "1.4.754", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.754.tgz", - "integrity": "sha512-7Kr5jUdns5rL/M9wFFmMZAgFDuL2YOnanFH4OI4iFzUqyh3XOL7nAGbSlSMZdzKMIyyTpNSbqZsWG9odwLeKvA==" + "version": "1.5.13", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.13.tgz", + "integrity": "sha512-lbBcvtIJ4J6sS4tb5TLp1b4LyfCdMkwStzXPyAgVgTRAsep4bvrAGaBOP7ZJtQMNJpSQ9SqG4brWOroNaQtm7Q==" }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -5674,9 +5787,9 @@ } }, "node_modules/emoticon": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", - "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.1.0.tgz", + "integrity": "sha512-VWZfnxqwNcc51hIy/sbOdEem6D+cVtpPzEEtVAFdaas30+1dgkyaOQ4sQ6Bp0tOMqWO1v+HQfYaoodOkdhK6SQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -5691,9 +5804,9 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.16.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz", - "integrity": "sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA==", + "version": "5.17.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz", + "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -5882,12 +5995,11 @@ } }, "node_modules/estree-util-value-to-estree": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.1.tgz", - "integrity": "sha512-5mvUrF2suuv5f5cGDnDphIy4/gW86z82kl5qG6mM9z04SEQI4FB5Apmaw/TGEf3l55nLtMs5s51dmhUzvAHQCA==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/estree-util-value-to-estree/-/estree-util-value-to-estree-3.1.2.tgz", + "integrity": "sha512-S0gW2+XZkmsx00tU2uJ4L9hUT7IFabbml9pHh2WQqFmAbxit++YGZne0sKJbNwkj9Wvg9E4uqWl4nCIFQMmfag==", "dependencies": { - "@types/estree": "^1.0.0", - "is-plain-obj": "^4.0.0" + "@types/estree": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/remcohaszing" @@ -6900,9 +7012,9 @@ } }, "node_modules/hast-util-raw": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.3.tgz", - "integrity": "sha512-ICWvVOF2fq4+7CMmtCPD5CM4QKjPbHpPotE6+8tDooV0ZuyJVUzHsrNX+O5NaRbieTf0F7FfeBOMAwi6Td0+yQ==", + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.0.4.tgz", + "integrity": "sha512-LHE65TD2YiNsHD3YuXcKPHXPLuYh/gjp12mOfU8jxSrm1f/yJpsb0F/KKljS6U9LJoP0Ux+tCe8iJ2AsPzTdgA==", "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", @@ -6982,9 +7094,9 @@ "integrity": "sha512-qlD8YNDqyTKTyuITrDOffsl6Tdhv+UC4hcdAVuQsK4IMQ99nSgd1MIA/Q+jQYoh9r3hVUXhYh7urSRmXPkW04g==" }, "node_modules/hast-util-to-jsx-runtime/node_modules/style-to-object": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.6.tgz", - "integrity": "sha512-khxq+Qm3xEyZfKd/y9L3oIWQimxuc4STrQKtQn8aSDRHb8mFgpukgX1hdzfrMEW6JCjyJ8p89x+IUMVnCBI1PA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.7.tgz", + "integrity": "sha512-uSjr59G5u6fbxUfKbb8GcqMGT3Xs9v5IbPkjb0S16GyOeBLAzSRK0CixBv5YrYvzO6TDLzIS6QCn78tkqWngPw==", "dependencies": { "inline-style-parser": "0.2.3" } @@ -7442,9 +7554,9 @@ } }, "node_modules/infima": { - "version": "0.2.0-alpha.43", - "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.43.tgz", - "integrity": "sha512-2uw57LvUqW0rK/SWYnd/2rRfxNA5DDNOh33jxF7fy46VWoNhGxiUQyVZHbBMjQ33mQem0cjdDVwgWVAmlRfgyQ==", + "version": "0.2.0-alpha.44", + "resolved": "https://registry.npmjs.org/infima/-/infima-0.2.0-alpha.44.tgz", + "integrity": "sha512-tuRkUSO/lB3rEhLJk25atwAjgLuzq070+pOW8XcvpHky/YbENnRRdPd85IBkyeTgttmOy5ah+yHYsK1HhUd4lQ==", "engines": { "node": ">=12" } @@ -7834,9 +7946,9 @@ } }, "node_modules/joi": { - "version": "17.13.1", - "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.1.tgz", - "integrity": "sha512-vaBlIKCyo4FCUtCm7Eu4QZd/q02bWcxfUO6YSXAZOWF6gzcLBeba8kwotUdYJjDLW8Cz8RywsSOqiNJZW0mNvg==", + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", "dependencies": { "@hapi/hoek": "^9.3.0", "@hapi/topo": "^5.1.0", @@ -8232,9 +8344,9 @@ } }, "node_modules/mdast-util-gfm-autolink-literal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", - "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", "dependencies": { "@types/mdast": "^4.0.0", "ccount": "^2.0.0", @@ -8376,9 +8488,9 @@ } }, "node_modules/mdast-util-mdx-jsx": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.2.tgz", - "integrity": "sha512-eKMQDeywY2wlHc97k5eD8VC+9ASMjN8ItEZQNGwJ6E0XWKiW/Z0V5/H8pvoXUf+y+Mj0VIgeRRbujBmFn4FTyA==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.3.tgz", + "integrity": "sha512-bfOjvNt+1AcbPLTFMFWY149nJz0OjmewJs3LQQ5pIyVGxP4CdOqNVJL6kTaM5c68p8q82Xv3nCyFfUnuEcH3UQ==", "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", @@ -8390,7 +8502,6 @@ "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", - "unist-util-remove-position": "^5.0.0", "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" }, @@ -8651,9 +8762,9 @@ ] }, "node_modules/micromark-extension-directive": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.0.tgz", - "integrity": "sha512-61OI07qpQrERc+0wEysLHMvoiO3s2R56x5u7glHq2Yqq6EHbH4dW25G9GfDdGCDYqA21KE6DWgNSzxSwHc2hSg==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.1.tgz", + "integrity": "sha512-VGV2uxUzhEZmaP7NSFo2vtq7M2nUD+WfmYQD+d8i/1nHbzE+rMy9uzTvUybBbNiVbrhOZibg3gbyoARGqgDWyg==", "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", @@ -8790,9 +8901,9 @@ } }, "node_modules/micromark-extension-gfm-autolink-literal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.0.0.tgz", - "integrity": "sha512-rTHfnpt/Q7dEAK1Y5ii0W8bhfJlVJFnJMHIPisfPK3gpVNuOP0VnRl96+YJ3RYWV/P4gFeQoGKNlT3RhuvpqAg==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", @@ -8839,9 +8950,9 @@ ] }, "node_modules/micromark-extension-gfm-footnote": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.0.0.tgz", - "integrity": "sha512-6Rzu0CYRKDv3BfLAUnZsSlzx3ak6HAoI85KTiijuKIz5UxZxbUI+pD6oHgw+6UtQuiRwnGRhzMmPRv4smcz0fg==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", "dependencies": { "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", @@ -8911,9 +9022,9 @@ ] }, "node_modules/micromark-extension-gfm-strikethrough": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.0.0.tgz", - "integrity": "sha512-c3BR1ClMp5fxxmwP6AoOY2fXO9U8uFMKs4ADD66ahLTNcwzSCyRVU4k7LPV5Nxo/VJiR4TdzxRQY2v3qIUceCw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", @@ -8943,9 +9054,9 @@ ] }, "node_modules/micromark-extension-gfm-table": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.0.0.tgz", - "integrity": "sha512-PoHlhypg1ItIucOaHmKE8fbin3vTLpDOUg8KAr8gRCF1MOZI9Nquq2i/44wFvviM4WuxJzc3demT8Y3dkfvYrw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.0.tgz", + "integrity": "sha512-Ub2ncQv+fwD70/l4ou27b4YzfNaCJOvyX4HxXU15m7mpYY+rjuWzsLIPZHJL253Z643RpbcP1oeIJlQ/SKW67g==", "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", @@ -9024,9 +9135,9 @@ } }, "node_modules/micromark-extension-gfm-task-list-item": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.0.1.tgz", - "integrity": "sha512-cY5PzGcnULaN5O7T+cOzfMoHjBW7j+T9D2sucA5d/KbsBTPcYdebm9zUd9zzdgJGCwahV+/W78Z3nbulBYVbTw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", @@ -10202,11 +10313,11 @@ ] }, "node_modules/micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", "dependencies": { - "braces": "^3.0.2", + "braces": "^3.0.3", "picomatch": "^2.3.1" }, "engines": { @@ -10392,9 +10503,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" }, "node_modules/normalize-path": { "version": "3.0.0", @@ -10688,9 +10799,9 @@ } }, "node_modules/parse-entities/node_modules/@types/unist": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", - "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" }, "node_modules/parse-json": { "version": "5.2.0", @@ -10815,9 +10926,9 @@ } }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==" }, "node_modules/picomatch": { "version": "2.3.1", @@ -12447,9 +12558,9 @@ "integrity": "sha512-PGMBq03+TTG/p/cRB7HCLKJ1MgDIi07+QU1faSjiYRfmY5UsAttV9Hs08jDAHVwcOwmVLcSJkpwyfXszVjWfIQ==" }, "node_modules/rtlcss": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.1.1.tgz", - "integrity": "sha512-/oVHgBtnPNcggP2aVXQjSy6N1mMAfHg4GSag0QtZBlD5bdDgAHwr4pydqJGd+SUCu9260+Pjqbjwtvu7EMH1KQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/rtlcss/-/rtlcss-4.3.0.tgz", + "integrity": "sha512-FI+pHEn7Wc4NqKXMXFM+VAYKEj/mRIcW4h24YVwVtyjI+EqGrLc2Hx/Ny0lrZ21cBWU2goLy36eqMcNj3AQJig==", "dependencies": { "escalade": "^3.1.1", "picocolors": "^1.0.0", @@ -12541,9 +12652,9 @@ } }, "node_modules/search-insights": { - "version": "2.14.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.14.0.tgz", - "integrity": "sha512-OLN6MsPMCghDOqlCtsIsYgtsC0pnwVTyT9Mu6A3ewOj1DxvzZF6COrn2g86E/c05xbktB0XN04m/t1Z+n+fTGw==", + "version": "2.17.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.17.0.tgz", + "integrity": "sha512-AskayU3QNsXQzSL6v4LTYST7NNfs2HWyHHB+sdORP9chsytAhro5XRfToAMI/LAVYgNbzowVZTMfBRodgbUHKg==", "peer": true }, "node_modules/section-matter": { @@ -13588,9 +13699,9 @@ } }, "node_modules/unified": { - "version": "11.0.4", - "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.4.tgz", - "integrity": "sha512-apMPnyLjAX+ty4OrNap7yumyVAMlKx5IWU2wlzzUdYJO9A8f1p9m/gywF/GM2ZDFcjQPrx59Mc90KwmxsoklxQ==", + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", @@ -13655,19 +13766,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unist-util-remove-position": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", - "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==", - "dependencies": { - "@types/unist": "^3.0.0", - "unist-util-visit": "^5.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, "node_modules/unist-util-stringify-position": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", @@ -13724,9 +13822,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.14", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.14.tgz", - "integrity": "sha512-JixKH8GR2pWYshIPUg/NujK3JO7JiqEEUiNArE86NQyrgUuZeTlZQN3xuS/yiV5Kb48ev9K6RqNkaJjXsdg7Jw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", "funding": [ { "type": "opencollective", @@ -13743,7 +13841,7 @@ ], "dependencies": { "escalade": "^3.1.2", - "picocolors": "^1.0.0" + "picocolors": "^1.0.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -13976,12 +14074,11 @@ } }, "node_modules/vfile": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.1.tgz", - "integrity": "sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", "dependencies": { "@types/unist": "^3.0.0", - "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" }, "funding": { @@ -13990,9 +14087,9 @@ } }, "node_modules/vfile-location": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.2.tgz", - "integrity": "sha512-NXPYyxyBSH7zB5U6+3uDdd6Nybz6o6/od9rk8bp9H8GR3L+cm/fC0uUTbqBmUTnMCUDslAGBOIKNfvvb+gGlDg==", + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", "dependencies": { "@types/unist": "^3.0.0", "vfile": "^6.0.0" @@ -14045,20 +14142,19 @@ } }, "node_modules/webpack": { - "version": "5.91.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.91.0.tgz", - "integrity": "sha512-rzVwlLeBWHJbmgTC/8TvAcu5vpJNII+MelQpylD4jNERPwpBJOE2lEcko1zJX3QJeLjTTAnQxn/OJ8bjDzVQaw==", + "version": "5.94.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz", + "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==", "dependencies": { - "@types/eslint-scope": "^3.7.3", "@types/estree": "^1.0.5", "@webassemblyjs/ast": "^1.12.1", "@webassemblyjs/wasm-edit": "^1.12.1", "@webassemblyjs/wasm-parser": "^1.12.1", "acorn": "^8.7.1", - "acorn-import-assertions": "^1.9.0", + "acorn-import-attributes": "^1.9.5", "browserslist": "^4.21.10", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.16.0", + "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", "eslint-scope": "5.1.1", "events": "^3.2.0", @@ -14231,9 +14327,9 @@ } }, "node_modules/webpack-dev-server/node_modules/ws": { - "version": "8.17.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.0.tgz", - "integrity": "sha512-uJq6108EgZMAl20KagGkzCKfMEjxmKvZHG7Tlq0Z6nOky7YF7aq4mOx6xK8TJ/i1LeK4Qus7INktacctDgY8Ow==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz", + "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==", "engines": { "node": ">=10.0.0" }, @@ -14475,9 +14571,9 @@ } }, "node_modules/ws": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.9.tgz", - "integrity": "sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", "engines": { "node": ">=8.3.0" }, diff --git a/docsite/package.json b/docsite/package.json index e65b63f4f..94daf25ea 100644 --- a/docsite/package.json +++ b/docsite/package.json @@ -15,8 +15,8 @@ "typecheck": "tsc" }, "dependencies": { - "@docusaurus/core": "^3.4.0", - "@docusaurus/preset-classic": "^3.4.0", + "@docusaurus/core": "^3.5.2", + "@docusaurus/preset-classic": "^3.5.2", "@mdx-js/react": "^3.0.1", "clsx": "^2.1.1", "prism-react-renderer": "^2.3.1", @@ -24,9 +24,9 @@ "react-dom": "^18.3.1" }, "devDependencies": { - "@docusaurus/module-type-aliases": "^3.4.0", - "@docusaurus/tsconfig": "^3.4.0", - "@docusaurus/types": "^3.4.0", + "@docusaurus/module-type-aliases": "^3.5.2", + "@docusaurus/tsconfig": "^3.5.2", + "@docusaurus/types": "^3.5.2", "dotenv": "^16.4.5", "typescript": "^5.4.5" }, diff --git a/docsite/src/components/HomepageFeatures/index.tsx b/docsite/src/components/HomepageFeatures/index.tsx index e1316e2f3..fd551ff2d 100644 --- a/docsite/src/components/HomepageFeatures/index.tsx +++ b/docsite/src/components/HomepageFeatures/index.tsx @@ -9,7 +9,7 @@ type FeatureItem = { const FeatureList: FeatureItem[] = [ { - title: "automated builds 🛠️", + title: "Automated builds 🛠️", description: ( <> Build auto versioned docker images from github repos, trigger builds on @@ -18,7 +18,7 @@ const FeatureList: FeatureItem[] = [ ), }, { - title: "deploy docker containers 🚀", + title: "Deploy docker containers 🚀", description: ( <> Deploy containers, deploy docker compose, see uptime and logs across all @@ -27,7 +27,7 @@ const FeatureList: FeatureItem[] = [ ), }, { - title: "powered by Rust 🦀", + title: "Powered by Rust 🦀", description: <>The core API and periphery agent are written in Rust, }, ]; diff --git a/docsite/src/components/MonitorLogo.tsx b/docsite/src/components/KomodoLogo.tsx similarity index 75% rename from docsite/src/components/MonitorLogo.tsx rename to docsite/src/components/KomodoLogo.tsx index 5fa3e8d9c..0ddcb49b7 100644 --- a/docsite/src/components/MonitorLogo.tsx +++ b/docsite/src/components/KomodoLogo.tsx @@ -1,6 +1,6 @@ import React from "react"; -export default function MonitorLogo({ width = "4rem" }) { +export default function KomodoLogo({ width = "4rem" }) { return (
- +

- Monitor + Komodo

@@ -40,13 +40,13 @@ function HomepageHeader() { Github +
diff --git a/frontend/Dockerfile b/frontend/Dockerfile index 8bbabc2ca..9f9e7a22b 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -5,11 +5,11 @@ WORKDIR /app COPY ./frontend ./frontend COPY ./client/core/ts ./client -ARG VITE_MONITOR_HOST -ENV VITE_MONITOR_HOST ${VITE_MONITOR_HOST} +ARG VITE_KOMODO_HOST +ENV VITE_KOMODO_HOST ${VITE_KOMODO_HOST} RUN cd client && yarn && yarn build && yarn link -RUN cd frontend && yarn link @monitor/client && yarn && yarn build +RUN cd frontend && yarn link @komodo/client && yarn && yarn build ENV PORT 4174 diff --git a/frontend/index.html b/frontend/index.html index 28cace14a..8058071a1 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -11,7 +11,7 @@ - Monitor + Komodo
diff --git a/frontend/package.json b/frontend/package.json index 548f7d6d7..e077d6b70 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -35,7 +35,7 @@ "cmdk": "1.0.0", "jotai": "2.9.2", "lightweight-charts": "4.2.0", - "lucide-react": "0.427.0", + "lucide-react": "0.437.0", "react": "18.3.1", "react-dom": "18.3.1", "react-minimal-pie-chart": "8.4.0", diff --git a/frontend/public/manifest.json b/frontend/public/manifest.json index 2a7e03f10..df2f61cf8 100644 --- a/frontend/public/manifest.json +++ b/frontend/public/manifest.json @@ -1,6 +1,6 @@ { - "short_name": "Monitor", - "name": "Monitor", + "short_name": "Komodo", + "name": "Komodo", "icons": [ { "src": "favicon.ico", diff --git a/frontend/src/components/alert/index.tsx b/frontend/src/components/alert/index.tsx index 84e79f276..3219dc2e9 100644 --- a/frontend/src/components/alert/index.tsx +++ b/frontend/src/components/alert/index.tsx @@ -1,7 +1,7 @@ import { Section } from "@components/layouts"; import { alert_level_intention } from "@lib/color"; import { useRead, atomWithStorage } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { useAtom } from "jotai"; import { AlertTriangle } from "lucide-react"; diff --git a/frontend/src/components/alert/table.tsx b/frontend/src/components/alert/table.tsx index 63180659e..bc486c674 100644 --- a/frontend/src/components/alert/table.tsx +++ b/frontend/src/components/alert/table.tsx @@ -1,4 +1,4 @@ -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DataTable } from "@ui/data-table"; import { AlertLevel } from "."; import { AlertDetailsDialog } from "./details"; diff --git a/frontend/src/components/alert/topbar.tsx b/frontend/src/components/alert/topbar.tsx index 3c4add289..171cf77f3 100644 --- a/frontend/src/components/alert/topbar.tsx +++ b/frontend/src/components/alert/topbar.tsx @@ -17,7 +17,7 @@ import { DialogHeader, DialogTitle, } from "@ui/dialog"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useState } from "react"; export const TopbarAlerts = () => { diff --git a/frontend/src/components/config/index.tsx b/frontend/src/components/config/index.tsx index 6580333ca..d4a9f8b33 100644 --- a/frontend/src/components/config/index.tsx +++ b/frontend/src/components/config/index.tsx @@ -5,7 +5,7 @@ import { } from "@components/config/util"; import { Section } from "@components/layouts"; import { cn } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Card, CardHeader, CardTitle, CardContent } from "@ui/card"; import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; diff --git a/frontend/src/components/config/util.tsx b/frontend/src/components/config/util.tsx index 639dfe705..682189be9 100644 --- a/frontend/src/components/config/util.tsx +++ b/frontend/src/components/config/util.tsx @@ -1,6 +1,6 @@ /* eslint-disable @typescript-eslint/no-explicit-any */ import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Select, SelectTrigger, @@ -310,11 +310,13 @@ export const ProviderSelector = ({ - {providers?.map((provider) => ( - - {provider} - - ))} + {providers + ?.filter((provider) => provider) + .map((provider) => ( + + {provider} + + ))} {providers !== undefined && selected && !providers.includes(selected) && ( @@ -420,11 +422,13 @@ export const AccountSelector = ({ None - {accounts?.map((account) => ( - - {account} - - ))} + {accounts + ?.filter((account) => account) + .map((account) => ( + + {account} + + ))} ); @@ -476,11 +480,13 @@ export const AwsEcrLabelSelector = ({ None - {labels?.map((label: string) => ( - - {label} - - ))} + {labels + ?.filter((label) => label) + .map((label: string) => ( + + {label} + + ))} ); @@ -934,11 +940,13 @@ const OrganizationSelector = ({ None - {orgs?.map((org) => ( - - {org} - - ))} + {orgs + ?.filter((org) => org) + .map((org) => ( + + {org} + + ))} Custom diff --git a/frontend/src/components/export.tsx b/frontend/src/components/export.tsx index 8a96e65c6..09ed8deda 100644 --- a/frontend/src/components/export.tsx +++ b/frontend/src/components/export.tsx @@ -1,5 +1,5 @@ import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Dialog, diff --git a/frontend/src/components/keys/table.tsx b/frontend/src/components/keys/table.tsx index 49ecc8dd2..e611029ab 100644 --- a/frontend/src/components/keys/table.tsx +++ b/frontend/src/components/keys/table.tsx @@ -1,5 +1,5 @@ import { CopyButton } from "@components/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DataTable } from "@ui/data-table"; import { Input } from "@ui/input"; import { ReactNode } from "react"; diff --git a/frontend/src/components/layouts.tsx b/frontend/src/components/layouts.tsx index 8beac7488..813084b16 100644 --- a/frontend/src/components/layouts.tsx +++ b/frontend/src/components/layouts.tsx @@ -10,7 +10,7 @@ import { DialogTitle, DialogTrigger, } from "@ui/dialog"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ResourceComponents } from "./resources"; import { Card, CardHeader, CardTitle, CardContent, CardFooter } from "@ui/card"; import { ResourceTags } from "./tags"; diff --git a/frontend/src/components/log.tsx b/frontend/src/components/log.tsx index 09cc953fa..655c3f5b3 100644 --- a/frontend/src/components/log.tsx +++ b/frontend/src/components/log.tsx @@ -1,5 +1,5 @@ import { logToHtml } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Select, SelectContent, SelectGroup, SelectItem, SelectTrigger, SelectValue } from "@ui/select"; import { ChevronDown } from "lucide-react"; diff --git a/frontend/src/components/resources/alerter/config/alert_types.tsx b/frontend/src/components/resources/alerter/config/alert_types.tsx index 312929103..7b2830cc2 100644 --- a/frontend/src/components/resources/alerter/config/alert_types.tsx +++ b/frontend/src/components/resources/alerter/config/alert_types.tsx @@ -1,5 +1,5 @@ import { ConfigItem } from "@components/config/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Badge } from "@ui/badge"; import { Select, diff --git a/frontend/src/components/resources/alerter/config/endpoint.tsx b/frontend/src/components/resources/alerter/config/endpoint.tsx index ab8f083cd..70bfd58fd 100644 --- a/frontend/src/components/resources/alerter/config/endpoint.tsx +++ b/frontend/src/components/resources/alerter/config/endpoint.tsx @@ -1,6 +1,6 @@ import { ConfigItem } from "@components/config/util"; import { TextUpdateMenu } from "@components/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@ui/select"; const ENDPOINT_TYPES: Types.AlerterEndpoint["type"][] = ["Custom", "Slack"]; diff --git a/frontend/src/components/resources/alerter/config/index.tsx b/frontend/src/components/resources/alerter/config/index.tsx index 63adb5929..427cb0ccc 100644 --- a/frontend/src/components/resources/alerter/config/index.tsx +++ b/frontend/src/components/resources/alerter/config/index.tsx @@ -1,6 +1,6 @@ import { Config } from "@components/config"; import { useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useState } from "react"; import { EndpointConfig } from "./endpoint"; import { AlertTypeConfig } from "./alert_types"; diff --git a/frontend/src/components/resources/alerter/config/resources.tsx b/frontend/src/components/resources/alerter/config/resources.tsx index 8d47c8794..89caf741f 100644 --- a/frontend/src/components/resources/alerter/config/resources.tsx +++ b/frontend/src/components/resources/alerter/config/resources.tsx @@ -3,7 +3,7 @@ import { ResourceComponents } from "@components/resources"; import { ResourceLink } from "@components/resources/common"; import { useRead } from "@lib/hooks"; import { resource_name } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { UsableResource } from "@types"; import { Button } from "@ui/button"; import { DataTable, SortableHeader } from "@ui/data-table"; diff --git a/frontend/src/components/resources/alerter/index.tsx b/frontend/src/components/resources/alerter/index.tsx index 2604ffbc6..26ba6c933 100644 --- a/frontend/src/components/resources/alerter/index.tsx +++ b/frontend/src/components/resources/alerter/index.tsx @@ -6,7 +6,7 @@ import { Card, CardDescription, CardHeader, CardTitle } from "@ui/card"; import { AlerterConfig } from "./config"; import { DeleteResource, NewResource } from "../common"; import { AlerterTable } from "./table"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; const useAlerter = (id?: string) => useRead("ListAlerters", {}).data?.find((d) => d.id === id); diff --git a/frontend/src/components/resources/alerter/table.tsx b/frontend/src/components/resources/alerter/table.tsx index 864c19295..4ca35844c 100644 --- a/frontend/src/components/resources/alerter/table.tsx +++ b/frontend/src/components/resources/alerter/table.tsx @@ -1,7 +1,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { ResourceLink } from "../common"; import { TableTags } from "@components/tags"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const AlerterTable = ({ alerters, diff --git a/frontend/src/components/resources/build/actions.tsx b/frontend/src/components/resources/build/actions.tsx index ea838e5da..aa7e0bbb1 100644 --- a/frontend/src/components/resources/build/actions.tsx +++ b/frontend/src/components/resources/build/actions.tsx @@ -1,6 +1,6 @@ import { ConfirmButton } from "@components/util"; import { useExecute, useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Ban, Hammer, Loader2 } from "lucide-react"; import { useBuilder } from "../builder"; diff --git a/frontend/src/components/resources/build/config.tsx b/frontend/src/components/resources/build/config.tsx index 70457bdb9..e84d4bfa0 100644 --- a/frontend/src/components/resources/build/config.tsx +++ b/frontend/src/components/resources/build/config.tsx @@ -12,7 +12,7 @@ import { } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; import { env_to_text } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Textarea } from "@ui/textarea"; import { Ban, CirclePlus, PlusCircle } from "lucide-react"; @@ -265,7 +265,7 @@ export const BuildConfig = ({ { label: "Git Webhook", description: - "Configure your repo provider to send webhooks to Monitor", + "Configure your repo provider to send webhooks to Komodo", components: { ["Guard" as any]: () => { if (update.branch ?? config.branch) { diff --git a/frontend/src/components/resources/build/index.tsx b/frontend/src/components/resources/build/index.tsx index 825c764c2..5d68d375a 100644 --- a/frontend/src/components/resources/build/index.tsx +++ b/frontend/src/components/resources/build/index.tsx @@ -15,7 +15,7 @@ import { cn } from "@lib/utils"; import { useState } from "react"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@ui/tabs"; import { ResourceComponents } from ".."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DashboardPieChart } from "@pages/home/dashboard"; import { StatusBadge } from "@components/util"; import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; diff --git a/frontend/src/components/resources/build/table.tsx b/frontend/src/components/resources/build/table.tsx index 94bfad100..f8fac6ea7 100644 --- a/frontend/src/components/resources/build/table.tsx +++ b/frontend/src/components/resources/build/table.tsx @@ -3,7 +3,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { fmt_version } from "@lib/formatting"; import { ResourceLink } from "../common"; import { BuildComponents } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const BuildTable = ({ builds }: { builds: Types.BuildListItem[] }) => { return ( diff --git a/frontend/src/components/resources/builder/config.tsx b/frontend/src/components/resources/builder/config.tsx index de40066ea..454bf7831 100644 --- a/frontend/src/components/resources/builder/config.tsx +++ b/frontend/src/components/resources/builder/config.tsx @@ -1,7 +1,7 @@ import { Config } from "@components/config"; import { InputList } from "@components/config/util"; import { useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useState } from "react"; import { ResourceSelector } from "../common"; import { Button } from "@ui/button"; diff --git a/frontend/src/components/resources/builder/index.tsx b/frontend/src/components/resources/builder/index.tsx index 28499f90e..921e2b927 100644 --- a/frontend/src/components/resources/builder/index.tsx +++ b/frontend/src/components/resources/builder/index.tsx @@ -1,6 +1,6 @@ import { NewLayout } from "@components/layouts"; import { useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { RequiredResourceComponents } from "@types"; import { Card, CardDescription, CardHeader, CardTitle } from "@ui/card"; import { Input } from "@ui/input"; diff --git a/frontend/src/components/resources/builder/table.tsx b/frontend/src/components/resources/builder/table.tsx index aaec8c091..d69525a27 100644 --- a/frontend/src/components/resources/builder/table.tsx +++ b/frontend/src/components/resources/builder/table.tsx @@ -2,7 +2,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { ResourceLink } from "../common"; import { TableTags } from "@components/tags"; import { BuilderInstanceType } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const BuilderTable = ({ builders, diff --git a/frontend/src/components/resources/common.tsx b/frontend/src/components/resources/common.tsx index 7f7094603..b9192f327 100644 --- a/frontend/src/components/resources/common.tsx +++ b/frontend/src/components/resources/common.tsx @@ -32,7 +32,7 @@ import { ResourceComponents } from "."; import { Input } from "@ui/input"; import { useToast } from "@ui/use-toast"; import { NewLayout } from "@components/layouts"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ConfigItem, DoubleInput } from "@components/config/util"; import { filterBySplit, usableResourcePath } from "@lib/utils"; @@ -122,7 +122,7 @@ export const ResourceSelector = ({ @@ -258,15 +258,17 @@ export const NewResource = ({ readable_type, server_id, build_id, + name: _name = "", }: { type: UsableResource; readable_type?: string; server_id?: string; build_id?: string; + name?: string; }) => { const nav = useNavigate(); const { mutateAsync } = useWrite(`Create${type}`); - const [name, setName] = useState(""); + const [name, setName] = useState(_name); const type_display = type === "ServerTemplate" ? "server-template" diff --git a/frontend/src/components/resources/deployment/actions.tsx b/frontend/src/components/resources/deployment/actions.tsx index 033206b8d..b1807d1f6 100644 --- a/frontend/src/components/resources/deployment/actions.tsx +++ b/frontend/src/components/resources/deployment/actions.tsx @@ -1,10 +1,10 @@ import { ActionWithDialog, ConfirmButton } from "@components/util"; -import { Play, Trash, Pause, Rocket, Pen, RefreshCcw } from "lucide-react"; +import { Play, Trash, Pause, Rocket, Pen, RefreshCcw, Square } from "lucide-react"; import { useExecute, useInvalidate, useRead, useWrite } from "@lib/hooks"; import { Input } from "@ui/input"; import { useToast } from "@ui/use-toast"; import { useEffect, useState } from "react"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Select, SelectContent, @@ -18,7 +18,7 @@ interface DeploymentId { id: string; } -export const DeployContainer = ({ id }: DeploymentId) => { +export const DeployDeployment = ({ id }: DeploymentId) => { const deployment = useRead("GetDeployment", { deployment: id }).data; const [signal, setSignal] = useState(); @@ -83,7 +83,7 @@ export const DeployContainer = ({ id }: DeploymentId) => { } }; -export const RemoveContainer = ({ id }: DeploymentId) => { +export const DestroyDeployment = ({ id }: DeploymentId) => { const deployment = useRead("GetDeployment", { deployment: id }).data; const [signal, setSignal] = useState(); @@ -92,20 +92,20 @@ export const RemoveContainer = ({ id }: DeploymentId) => { [deployment?.config?.termination_signal] ); - const { mutate, isPending } = useExecute("RemoveContainer"); + const { mutate, isPending } = useExecute("DestroyDeployment"); const deployments = useRead("ListDeployments", {}).data; const state = deployments?.find((d) => d.id === id)?.info.state; - const removing = useRead( + const destroying = useRead( "GetDeploymentActionState", { deployment: id, }, { refetchInterval: 5000 } - ).data?.removing; + ).data?.destroying; - const pending = isPending || removing; + const pending = isPending || destroying; if (!deployment) return null; if (state === Types.DeploymentState.NotDeployed) return null; @@ -113,7 +113,7 @@ export const RemoveContainer = ({ id }: DeploymentId) => { return ( } onClick={() => mutate({ deployment: id, signal })} disabled={pending} @@ -131,11 +131,11 @@ export const RemoveContainer = ({ id }: DeploymentId) => { ); }; -export const RestartContainer = ({ id }: DeploymentId) => { +export const RestartDeployment = ({ id }: DeploymentId) => { const deployment = useDeployment(id); const state = deployment?.info.state; const { mutate: restart, isPending: restartPending } = - useExecute("RestartContainer"); + useExecute("RestartDeployment"); const action_state = useRead( "GetDeploymentActionState", { @@ -161,11 +161,11 @@ export const RestartContainer = ({ id }: DeploymentId) => { ); }; -export const StartStopContainer = ({ id }: DeploymentId) => { +export const StartStopDeployment = ({ id }: DeploymentId) => { const deployment = useDeployment(id); const state = deployment?.info.state; const { mutate: start, isPending: startPending } = - useExecute("StartContainer"); + useExecute("StartDeployment"); const action_state = useRead( "GetDeploymentActionState", { @@ -187,11 +187,11 @@ export const StartStopContainer = ({ id }: DeploymentId) => { ); } if (state === Types.DeploymentState.Running) { - return ; + return ; } }; -const StopContainer = ({ id }: DeploymentId) => { +const StopDeployment = ({ id }: DeploymentId) => { const deployment = useRead("GetDeployment", { deployment: id }).data; const [signal, setSignal] = useState(); @@ -200,7 +200,7 @@ const StopContainer = ({ id }: DeploymentId) => { [deployment?.config?.termination_signal] ); - const { mutate, isPending } = useExecute("StopContainer"); + const { mutate, isPending } = useExecute("StopDeployment"); const stopping = useRead( "GetDeploymentActionState", { @@ -216,7 +216,7 @@ const StopContainer = ({ id }: DeploymentId) => { } + icon={} onClick={() => mutate({ deployment: id, signal })} disabled={pending} loading={pending} @@ -272,13 +272,13 @@ const TermSignalSelector = ({ ); }; -export const PauseUnpauseContainer = ({ id }: DeploymentId) => { +export const PauseUnpauseDeployment = ({ id }: DeploymentId) => { const deployment = useDeployment(id); const state = deployment?.info.state; const { mutate: unpause, isPending: unpausePending } = - useExecute("UnpauseContainer"); + useExecute("UnpauseDeployment"); const { mutate: pause, isPending: pausePending } = - useExecute("PauseContainer"); + useExecute("PauseDeployment"); const action_state = useRead( "GetDeploymentActionState", { diff --git a/frontend/src/components/resources/deployment/config/components/environment.tsx b/frontend/src/components/resources/deployment/config/components/environment.tsx index 775350f56..f314758ec 100644 --- a/frontend/src/components/resources/deployment/config/components/environment.tsx +++ b/frontend/src/components/resources/deployment/config/components/environment.tsx @@ -1,6 +1,6 @@ import { ConfigItem, SecretSelector } from "@components/config/util"; import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Textarea } from "@ui/textarea"; import { RefObject, createRef } from "react"; diff --git a/frontend/src/components/resources/deployment/config/components/image.tsx b/frontend/src/components/resources/deployment/config/components/image.tsx index 56027da32..2f341d17a 100644 --- a/frontend/src/components/resources/deployment/config/components/image.tsx +++ b/frontend/src/components/resources/deployment/config/components/image.tsx @@ -4,7 +4,7 @@ import { ResourceSelector } from "@components/resources/common"; import { fmt_date, fmt_version } from "@lib/formatting"; import { useRead } from "@lib/hooks"; import { filterBySplit } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { CaretSortIcon } from "@radix-ui/react-icons"; import { Command, diff --git a/frontend/src/components/resources/deployment/config/components/network.tsx b/frontend/src/components/resources/deployment/config/components/network.tsx index 64cb77294..92b0a7523 100644 --- a/frontend/src/components/resources/deployment/config/components/network.tsx +++ b/frontend/src/components/resources/deployment/config/components/network.tsx @@ -27,8 +27,8 @@ export const NetworkModeSelector = ({ { server: server_id! }, { enabled: !!server_id } ) - .data?.filter((n) => n.Name) - .map((network) => network.Name) ?? []; + .data?.filter((n) => n.name) + .map((network) => network.name) ?? []; const [customMode, setCustomMode] = useState(false); const networks = @@ -72,15 +72,17 @@ export const NetworkModeSelector = ({ - {networks?.map((network) => ( - - {network!} - - ))} + {networks + ?.filter((network) => network) + .map((network) => ( + + {network!} + + ))} Custom diff --git a/frontend/src/components/resources/deployment/config/components/ports.tsx b/frontend/src/components/resources/deployment/config/components/ports.tsx index 95006e953..6b9ce7f10 100644 --- a/frontend/src/components/resources/deployment/config/components/ports.tsx +++ b/frontend/src/components/resources/deployment/config/components/ports.tsx @@ -1,5 +1,5 @@ import { DoubleInput } from "@components/config/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const PortsConfig = ({ ports, diff --git a/frontend/src/components/resources/deployment/config/components/restart.tsx b/frontend/src/components/resources/deployment/config/components/restart.tsx index 6403d0ce3..e645469a3 100644 --- a/frontend/src/components/resources/deployment/config/components/restart.tsx +++ b/frontend/src/components/resources/deployment/config/components/restart.tsx @@ -1,5 +1,5 @@ import { ConfigItem } from "@components/config/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Select, SelectContent, diff --git a/frontend/src/components/resources/deployment/config/components/term-signal.tsx b/frontend/src/components/resources/deployment/config/components/term-signal.tsx index 4f3371b4c..4ac69b7b8 100644 --- a/frontend/src/components/resources/deployment/config/components/term-signal.tsx +++ b/frontend/src/components/resources/deployment/config/components/term-signal.tsx @@ -1,5 +1,5 @@ import { ConfigItem } from "@components/config/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Input } from "@ui/input"; import { diff --git a/frontend/src/components/resources/deployment/config/components/volumes.tsx b/frontend/src/components/resources/deployment/config/components/volumes.tsx index 9a1491a99..8087e8ee8 100644 --- a/frontend/src/components/resources/deployment/config/components/volumes.tsx +++ b/frontend/src/components/resources/deployment/config/components/volumes.tsx @@ -1,5 +1,5 @@ import { DoubleInput } from "@components/config/util"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const VolumesConfig = ({ volumes, diff --git a/frontend/src/components/resources/deployment/config/index.tsx b/frontend/src/components/resources/deployment/config/index.tsx index 13bbd7714..36233a345 100644 --- a/frontend/src/components/resources/deployment/config/index.tsx +++ b/frontend/src/components/resources/deployment/config/index.tsx @@ -1,5 +1,5 @@ import { useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ReactNode, useState } from "react"; import { AddExtraArgMenu, diff --git a/frontend/src/components/resources/deployment/index.tsx b/frontend/src/components/resources/deployment/index.tsx index 153c37476..59a68cce0 100644 --- a/frontend/src/components/resources/deployment/index.tsx +++ b/frontend/src/components/resources/deployment/index.tsx @@ -1,16 +1,16 @@ import { useLocalStorage, useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { RequiredResourceComponents } from "@types"; import { AlertTriangle, HardDrive, Rocket, Server } from "lucide-react"; import { cn } from "@lib/utils"; import { useServer } from "../server"; import { - DeployContainer, - StartStopContainer, - RemoveContainer, + DeployDeployment, + StartStopDeployment, + DestroyDeployment, RenameDeployment, - RestartContainer, - PauseUnpauseContainer, + RestartDeployment, + PauseUnpauseDeployment, } from "./actions"; import { DeploymentLogs } from "./log"; import { @@ -35,7 +35,7 @@ export const useDeployment = (id?: string) => const ConfigOrLog = ({ id }: { id: string }) => { // const [view, setView] = useAtom(configOrLog); - const [view, setView] = useLocalStorage("deployment-tabs-v1","Config"); + const [view, setView] = useLocalStorage("deployment-tabs-v1", "Config"); const state = useDeployment(id)?.info.state; const logsDisabled = state === undefined || @@ -200,11 +200,11 @@ export const DeploymentComponents: RequiredResourceComponents = { if (!build_id) return null; return ; }, - DeployContainer, - RestartContainer, - PauseUnpauseContainer, - StartStopContainer, - RemoveContainer, + DeployDeployment, + RestartDeployment, + PauseUnpauseDeployment, + StartStopDeployment, + DestroyDeployment, }, Page: {}, diff --git a/frontend/src/components/resources/deployment/log.tsx b/frontend/src/components/resources/deployment/log.tsx index 7b522e16c..bf8c34007 100644 --- a/frontend/src/components/resources/deployment/log.tsx +++ b/frontend/src/components/resources/deployment/log.tsx @@ -1,6 +1,6 @@ import { Section } from "@components/layouts"; import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { RefreshCw, X, AlertOctagon } from "lucide-react"; import { ReactNode, useState } from "react"; @@ -133,7 +133,7 @@ const DeploymentLogsInner = ({ const NoSearchLogs = (id: string, tail: string, stream: string) => { const { data: log, refetch } = useRead( - "GetLog", + "GetDeploymentLog", { deployment: id, tail: Number(tail) }, { refetchInterval: 30000 } ); @@ -149,7 +149,7 @@ const NoSearchLogs = (id: string, tail: string, stream: string) => { }; const SearchLogs = (id: string, terms: string[], invert: boolean) => { - const { data: log, refetch } = useRead("SearchLog", { + const { data: log, refetch } = useRead("SearchDeploymentLog", { deployment: id, terms, combinator: Types.SearchCombinator.And, diff --git a/frontend/src/components/resources/deployment/table.tsx b/frontend/src/components/resources/deployment/table.tsx index 88108582b..f38cf018c 100644 --- a/frontend/src/components/resources/deployment/table.tsx +++ b/frontend/src/components/resources/deployment/table.tsx @@ -1,5 +1,5 @@ import { TableTags } from "@components/tags"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DataTable, SortableHeader } from "@ui/data-table"; import { useRead } from "@lib/hooks"; import { ResourceLink } from "../common"; diff --git a/frontend/src/components/resources/procedure/config.tsx b/frontend/src/components/resources/procedure/config.tsx index 5402bd454..5eea7ab1c 100644 --- a/frontend/src/components/resources/procedure/config.tsx +++ b/frontend/src/components/resources/procedure/config.tsx @@ -1,7 +1,7 @@ import { ConfigItem } from "@components/config/util"; import { Section } from "@components/layouts"; import { useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Card, CardHeader } from "@ui/card"; import { Input } from "@ui/input"; import { useEffect, useState } from "react"; @@ -504,13 +504,12 @@ const default_enabled_execution: () => Types.EnabledExecution = () => ({ const EXECUTION_TYPES: Types.Execution["type"][] = [ "RunBuild", "Deploy", - "RestartContainer", - "StartContainer", - "PauseContainer", - "UnpauseContainer", - "StopContainer", - "StopAllContainers", - "RemoveContainer", + "RestartDeployment", + "StartDeployment", + "PauseDeployment", + "UnpauseDeployment", + "StopDeployment", + "DestroyDeployment", "DeployStack", "StartStack", "RestartStack", @@ -523,9 +522,12 @@ const EXECUTION_TYPES: Types.Execution["type"][] = [ "BuildRepo", "RunProcedure", "RunSync", + "StopAllContainers", "PruneContainers", - "PruneImages", "PruneNetworks", + "PruneImages", + "PruneVolumes", + "PruneSystem", "Sleep", ]; @@ -592,13 +594,26 @@ type ExecutionConfigComponent< disabled: boolean; }>; -type ExecutionConfigParams = Extract< +type MinExecutionType = Exclude< + ExecutionType, + | "StartContainer" + | "RestartContainer" + | "PauseContainer" + | "UnpauseContainer" + | "StopContainer" + | "DestroyContainer" + | "DeleteNetwork" + | "DeleteImage" + | "DeleteVolume" +>; + +type ExecutionConfigParams = Extract< Types.Execution, { type: T } >["params"]; type ExecutionConfigs = { - [ExType in ExecutionType]: { + [ExType in MinExecutionType]: { Component: ExecutionConfigComponent; params: ExecutionConfigParams; }; @@ -609,107 +624,19 @@ const TARGET_COMPONENTS: ExecutionConfigs = { params: {}, Component: () => <>, }, - CloneRepo: { - params: { repo: "" }, + // Procedure + RunProcedure: { + params: { procedure: "" }, Component: ({ params, setParams, disabled }) => ( setParams({ repo })} - disabled={disabled} - /> - ), - }, - BuildRepo: { - params: { repo: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ repo })} - disabled={disabled} - /> - ), - }, - CancelRepoBuild: { - params: { repo: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ repo })} - disabled={disabled} - /> - ), - }, - Deploy: { - params: { deployment: "" }, - Component: ({ params, setParams, disabled }) => { - return ( - setParams({ deployment })} - disabled={disabled} - /> - ); - }, - }, - PruneContainers: { - params: { server: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ server })} - disabled={disabled} - /> - ), - }, - PruneImages: { - params: { server: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ server })} - disabled={disabled} - /> - ), - }, - PruneNetworks: { - params: { server: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ server })} - disabled={disabled} - /> - ), - }, - PullRepo: { - params: { repo: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ repo })} - disabled={disabled} - /> - ), - }, - RemoveContainer: { - params: { deployment: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ deployment })} + type="Procedure" + selected={params.procedure} + onSelect={(procedure) => setParams({ procedure })} disabled={disabled} /> ), }, + // Build RunBuild: { params: { build: "" }, Component: ({ params, setParams, disabled }) => ( @@ -732,18 +659,21 @@ const TARGET_COMPONENTS: ExecutionConfigs = { /> ), }, - RunProcedure: { - params: { procedure: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ procedure })} - disabled={disabled} - /> - ), + // Deployment + Deploy: { + params: { deployment: "" }, + Component: ({ params, setParams, disabled }) => { + return ( + setParams({ deployment })} + disabled={disabled} + /> + ); + }, }, - StartContainer: { + StartDeployment: { params: { deployment: "" }, Component: ({ params, setParams, disabled }) => ( ), }, - RestartContainer: { + RestartDeployment: { params: { deployment: "" }, Component: ({ params, setParams, disabled }) => ( ), }, - PauseContainer: { + PauseDeployment: { params: { deployment: "" }, Component: ({ params, setParams, disabled }) => ( ), }, - UnpauseContainer: { + UnpauseDeployment: { params: { deployment: "" }, Component: ({ params, setParams, disabled }) => ( ), }, - StopContainer: { + StopDeployment: { params: { deployment: "" }, Component: ({ params, setParams, disabled }) => ( ), }, - StopAllContainers: { - params: { server: "" }, + DestroyDeployment: { + params: { deployment: "" }, Component: ({ params, setParams, disabled }) => ( setParams({ server: id })} - disabled={disabled} - /> - ), - }, - RunSync: { - params: { sync: "" }, - Component: ({ params, setParams, disabled }) => ( - setParams({ sync: id })} + type="Deployment" + selected={params.deployment} + onSelect={(deployment) => setParams({ deployment })} disabled={disabled} /> ), }, + // Stack DeployStack: { params: { stack: "" }, Component: ({ params, setParams, disabled }) => ( @@ -897,6 +817,240 @@ const TARGET_COMPONENTS: ExecutionConfigs = { /> ), }, + // Repo + CloneRepo: { + params: { repo: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ repo })} + disabled={disabled} + /> + ), + }, + PullRepo: { + params: { repo: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ repo })} + disabled={disabled} + /> + ), + }, + BuildRepo: { + params: { repo: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ repo })} + disabled={disabled} + /> + ), + }, + CancelRepoBuild: { + params: { repo: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ repo })} + disabled={disabled} + /> + ), + }, + // Server + // StartContainer: { + // params: { server: "" }, + // Component: ({ params, setParams, disabled }) => ( + // setParams({ server })} + // disabled={disabled} + // /> + // ), + // }, + // RestartContainer: { + // params: { server: "" }, + // Component: ({ params, setParams, disabled }) => ( + // setParams({ server })} + // disabled={disabled} + // /> + // ), + // }, + // PauseContainer: { + // params: { server: "" }, + // Component: ({ params, setParams, disabled }) => ( + // setParams({ server })} + // disabled={disabled} + // /> + // ), + // }, + // UnpauseContainer: { + // params: { server: "" }, + // Component: ({ params, setParams, disabled }) => ( + // setParams({ server })} + // disabled={disabled} + // /> + // ), + // }, + // StopContainer: { + // params: { server: "" }, + // Component: ({ params, setParams, disabled }) => ( + // setParams({ server })} + // disabled={disabled} + // /> + // ), + // }, + // DestroyContainer: { + // params: { server: "", container: "" }, + // Component: ({ params, setParams, disabled }) => ( + // setParams({ server })} + // disabled={disabled} + // /> + // ), + // }, + StartAllContainers: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server: id })} + disabled={disabled} + /> + ), + }, + RestartAllContainers: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server: id })} + disabled={disabled} + /> + ), + }, + PauseAllContainers: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server: id })} + disabled={disabled} + /> + ), + }, + UnpauseAllContainers: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server: id })} + disabled={disabled} + /> + ), + }, + StopAllContainers: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server: id })} + disabled={disabled} + /> + ), + }, + PruneContainers: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server })} + disabled={disabled} + /> + ), + }, + PruneNetworks: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server })} + disabled={disabled} + /> + ), + }, + PruneImages: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server })} + disabled={disabled} + /> + ), + }, + PruneVolumes: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server })} + disabled={disabled} + /> + ), + }, + PruneSystem: { + params: { server: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ server })} + disabled={disabled} + /> + ), + }, + RunSync: { + params: { sync: "" }, + Component: ({ params, setParams, disabled }) => ( + setParams({ sync: id })} + disabled={disabled} + /> + ), + }, + Sleep: { params: { duration_ms: 0 }, Component: ({ params, setParams, disabled }) => { diff --git a/frontend/src/components/resources/procedure/index.tsx b/frontend/src/components/resources/procedure/index.tsx index 5acb82b88..fa475b8f6 100644 --- a/frontend/src/components/resources/procedure/index.tsx +++ b/frontend/src/components/resources/procedure/index.tsx @@ -10,7 +10,7 @@ import { stroke_color_class_by_intention, } from "@lib/color"; import { cn } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DashboardPieChart } from "@pages/home/dashboard"; const useProcedure = (id?: string) => @@ -27,7 +27,7 @@ const ProcedureIcon = ({ id, size }: { id?: string; size: number }) => { export const ProcedureComponents: RequiredResourceComponents = { list_item: (id) => useProcedure(id), - Description: () => <>Compose monitor actions together., + Description: () => <>Compose Komodo actions together., Dashboard: () => { const summary = useRead("GetProceduresSummary", {}).data; diff --git a/frontend/src/components/resources/procedure/table.tsx b/frontend/src/components/resources/procedure/table.tsx index 8218f62da..225a2b229 100644 --- a/frontend/src/components/resources/procedure/table.tsx +++ b/frontend/src/components/resources/procedure/table.tsx @@ -2,7 +2,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { TableTags } from "@components/tags"; import { ResourceLink } from "../common"; import { ProcedureComponents } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const ProcedureTable = ({ procedures, diff --git a/frontend/src/components/resources/repo/actions.tsx b/frontend/src/components/resources/repo/actions.tsx index ac047a4d8..ba26390f7 100644 --- a/frontend/src/components/resources/repo/actions.tsx +++ b/frontend/src/components/resources/repo/actions.tsx @@ -8,7 +8,7 @@ import { Loader2, } from "lucide-react"; import { useRepo } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useBuilder } from "../builder"; export const CloneRepo = ({ id }: { id: string }) => { diff --git a/frontend/src/components/resources/repo/config.tsx b/frontend/src/components/resources/repo/config.tsx index ee7e30fc6..4a1a736c7 100644 --- a/frontend/src/components/resources/repo/config.tsx +++ b/frontend/src/components/resources/repo/config.tsx @@ -7,7 +7,7 @@ import { SystemCommand, } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { createRef, useState } from "react"; import { BuilderSelector, CopyGithubWebhook, ServerSelector } from "../common"; import { useToast } from "@ui/use-toast"; @@ -152,7 +152,7 @@ export const RepoConfig = ({ id }: { id: string }) => { { label: "Git Webhooks", description: - "Configure your repo provider to send webhooks to Monitor", + "Configure your repo provider to send webhooks to Komodo", components: { ["Guard" as any]: () => { if (update.branch ?? config.branch) { diff --git a/frontend/src/components/resources/repo/index.tsx b/frontend/src/components/resources/repo/index.tsx index 093277f6c..b61ef679d 100644 --- a/frontend/src/components/resources/repo/index.tsx +++ b/frontend/src/components/resources/repo/index.tsx @@ -20,7 +20,7 @@ import { import { cn } from "@lib/utils"; import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; import { useServer } from "../server"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DashboardPieChart } from "@pages/home/dashboard"; import { StatusBadge } from "@components/util"; import { Badge } from "@ui/badge"; diff --git a/frontend/src/components/resources/repo/table.tsx b/frontend/src/components/resources/repo/table.tsx index 77589c726..bacd4a7bf 100644 --- a/frontend/src/components/resources/repo/table.tsx +++ b/frontend/src/components/resources/repo/table.tsx @@ -2,7 +2,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { ResourceLink } from "../common"; import { TableTags } from "@components/tags"; import { RepoComponents } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const RepoTable = ({ repos }: { repos: Types.RepoListItem[] }) => { return ( diff --git a/frontend/src/components/resources/resource-sync/config.tsx b/frontend/src/components/resources/resource-sync/config.tsx index 2eff18200..9b84ce2cd 100644 --- a/frontend/src/components/resources/resource-sync/config.tsx +++ b/frontend/src/components/resources/resource-sync/config.tsx @@ -5,7 +5,7 @@ import { ProviderSelectorConfig, } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ReactNode, useState } from "react"; import { CopyGithubWebhook } from "../common"; import { useToast } from "@ui/use-toast"; @@ -103,7 +103,7 @@ export const ResourceSyncConfig = ({ { label: "Git Webhooks", description: - "Configure your repo provider to send webhooks to Monitor", + "Configure your repo provider to send webhooks to Komodo", components: { ["Guard" as any]: () => { if (update.branch ?? config.branch) { diff --git a/frontend/src/components/resources/resource-sync/index.tsx b/frontend/src/components/resources/resource-sync/index.tsx index d33d6fece..87b8a24b0 100644 --- a/frontend/src/components/resources/resource-sync/index.tsx +++ b/frontend/src/components/resources/resource-sync/index.tsx @@ -4,7 +4,7 @@ import { Card } from "@ui/card"; import { Clock, FolderSync } from "lucide-react"; import { DeleteResource, NewResource } from "../common"; import { ResourceSyncTable } from "./table"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ExecuteSync, RefreshSync } from "./actions"; import { PendingOrConfig } from "./pending-or-config"; import { diff --git a/frontend/src/components/resources/resource-sync/pending-or-config.tsx b/frontend/src/components/resources/resource-sync/pending-or-config.tsx index adf855fa9..76ebe9cb5 100644 --- a/frontend/src/components/resources/resource-sync/pending-or-config.tsx +++ b/frontend/src/components/resources/resource-sync/pending-or-config.tsx @@ -4,7 +4,7 @@ import { Tabs, TabsContent, TabsList, TabsTrigger } from "@ui/tabs"; import { useState } from "react"; import { ResourceSyncConfig } from "./config"; import { Section } from "@components/layouts"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Card, CardContent, CardHeader, CardTitle } from "@ui/card"; const PENDING_TYPE_KEYS: Array<[string, string]> = [ diff --git a/frontend/src/components/resources/resource-sync/table.tsx b/frontend/src/components/resources/resource-sync/table.tsx index 74b6c9fec..47e1e5dab 100644 --- a/frontend/src/components/resources/resource-sync/table.tsx +++ b/frontend/src/components/resources/resource-sync/table.tsx @@ -1,7 +1,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { ResourceLink } from "../common"; import { TableTags } from "@components/tags"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ResourceSyncComponents } from "."; export const ResourceSyncTable = ({ diff --git a/frontend/src/components/resources/server-template/actions.tsx b/frontend/src/components/resources/server-template/actions.tsx index 9e108cb17..c30b46f95 100644 --- a/frontend/src/components/resources/server-template/actions.tsx +++ b/frontend/src/components/resources/server-template/actions.tsx @@ -15,7 +15,7 @@ import { } from "@ui/dialog"; import { useWebsocketMessages } from "@lib/socket"; import { useNavigate } from "react-router-dom"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const LaunchServer = ({ id }: { id: string }) => { const nav = useNavigate(); diff --git a/frontend/src/components/resources/server-template/config/aws.tsx b/frontend/src/components/resources/server-template/config/aws.tsx index f9714e3d9..3bbdab03f 100644 --- a/frontend/src/components/resources/server-template/config/aws.tsx +++ b/frontend/src/components/resources/server-template/config/aws.tsx @@ -3,7 +3,7 @@ import { InputList } from "@components/config/util"; import { TextUpdateMenu } from "@components/util"; import { useRead, useWrite } from "@lib/hooks"; import { cn } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Card } from "@ui/card"; import { diff --git a/frontend/src/components/resources/server-template/config/hetzner.tsx b/frontend/src/components/resources/server-template/config/hetzner.tsx index b9b5bdd40..eeea42f94 100644 --- a/frontend/src/components/resources/server-template/config/hetzner.tsx +++ b/frontend/src/components/resources/server-template/config/hetzner.tsx @@ -3,7 +3,7 @@ import { ConfigItem, InputList } from "@components/config/util"; import { TextUpdateMenu } from "@components/util"; import { useRead, useWrite } from "@lib/hooks"; import { cn, filterBySplit } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Card } from "@ui/card"; import { @@ -316,7 +316,8 @@ const ServerTypeSelector = ({ // The US based datacenters only have Amd servers const filter = datacenter === Types.HetznerDatacenter.HillsboroDc1 || - datacenter === Types.HetznerDatacenter.AshburnDc1 + datacenter === Types.HetznerDatacenter.AshburnDc1 || + datacenter === Types.HetznerDatacenter.SingaporeDc1 ? (st: string) => st.includes("Amd") : () => true; const server_types = Object.values(Types.HetznerServerType).filter(filter); diff --git a/frontend/src/components/resources/server-template/config/index.tsx b/frontend/src/components/resources/server-template/config/index.tsx index 3a366f444..584e9e922 100644 --- a/frontend/src/components/resources/server-template/config/index.tsx +++ b/frontend/src/components/resources/server-template/config/index.tsx @@ -1,7 +1,7 @@ import { useRead } from "@lib/hooks"; import { AwsServerTemplateConfig } from "./aws"; import { HetznerServerTemplateConfig } from "./hetzner"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const ServerTemplateConfig = ({ id }: { id: string }) => { const config = useRead("GetServerTemplate", { server_template: id }).data diff --git a/frontend/src/components/resources/server-template/index.tsx b/frontend/src/components/resources/server-template/index.tsx index e6b971aa5..f13855917 100644 --- a/frontend/src/components/resources/server-template/index.tsx +++ b/frontend/src/components/resources/server-template/index.tsx @@ -6,7 +6,7 @@ import { ServerTemplateConfig } from "./config"; import { Link, useNavigate } from "react-router-dom"; import { Card, CardDescription, CardHeader, CardTitle } from "@ui/card"; import { useState } from "react"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { NewLayout } from "@components/layouts"; import { Input } from "@ui/input"; import { diff --git a/frontend/src/components/resources/server-template/table.tsx b/frontend/src/components/resources/server-template/table.tsx index 5954eaac3..7569e2f36 100644 --- a/frontend/src/components/resources/server-template/table.tsx +++ b/frontend/src/components/resources/server-template/table.tsx @@ -1,7 +1,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { ResourceLink } from "../common"; import { TableTags } from "@components/tags"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const ServerTemplateTable = ({ serverTemplates, diff --git a/frontend/src/components/resources/server/actions.tsx b/frontend/src/components/resources/server/actions.tsx index 7796d6d1d..b2941367f 100644 --- a/frontend/src/components/resources/server/actions.tsx +++ b/frontend/src/components/resources/server/actions.tsx @@ -1,9 +1,12 @@ -import { ConfirmButton } from "@components/util"; -import { useInvalidate, useWrite } from "@lib/hooks"; +import { ActionWithDialog, ConfirmButton } from "@components/util"; +import { useExecute, useInvalidate, useRead, useWrite } from "@lib/hooks"; import { Input } from "@ui/input"; import { useToast } from "@ui/use-toast"; -import { Pen } from "lucide-react"; +import { Pen, Scissors } from "lucide-react"; import { useState } from "react"; +import { useServer } from "."; +import { has_minimum_permissions } from "@lib/utils"; +import { Types } from "@komodo/client"; export const RenameServer = ({ id }: { id: string }) => { const invalidate = useInvalidate(); @@ -40,3 +43,67 @@ export const RenameServer = ({ id }: { id: string }) => { ); }; + +export const Prune = ({ + server_id, + type, +}: { + server_id: string; + type: "Containers" | "Networks" | "Images" | "Volumes" | "System"; +}) => { + const server = useServer(server_id); + const { mutate, isPending } = useExecute(`Prune${type}`); + const action_state = useRead( + "GetServerActionState", + { server: server_id }, + { refetchInterval: 5000 } + ).data; + const perms = useRead("GetPermissionLevel", { + target: { type: "Server", id: server_id }, + }).data; + + if (!server) return; + + const canExecute = has_minimum_permissions( + perms, + Types.PermissionLevel.Execute + ); + + const pruningKey = + type === "Containers" + ? "pruning_containers" + : type === "Images" + ? "pruning_images" + : type === "Networks" + ? "pruning_networks" + : type === "Volumes" + ? "pruning_volumes" + : type === "System" + ? "pruning_system" + : ""; + + const pending = isPending || action_state?.[pruningKey]; + + if (type === "Images" || type === "Networks") { + return ( + } + onClick={() => mutate({ server: server_id })} + loading={pending} + disabled={!canExecute || pending} + /> + ); + } else { + return ( + } + onClick={() => mutate({ server: server_id })} + loading={pending} + disabled={!canExecute || pending} + /> + ); + } +}; diff --git a/frontend/src/components/resources/server/config.tsx b/frontend/src/components/resources/server/config.tsx index 717a553dc..2e71401dd 100644 --- a/frontend/src/components/resources/server/config.tsx +++ b/frontend/src/components/resources/server/config.tsx @@ -1,7 +1,7 @@ import { Config } from "@components/config"; import { InputList } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { PlusCircle } from "lucide-react"; import { ReactNode, useState } from "react"; diff --git a/frontend/src/components/resources/server/hooks.ts b/frontend/src/components/resources/server/hooks.ts index 662ca0198..76da759c1 100644 --- a/frontend/src/components/resources/server/hooks.ts +++ b/frontend/src/components/resources/server/hooks.ts @@ -1,5 +1,5 @@ import { atomWithStorage } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useAtom } from "jotai"; const statsGranularityAtom = atomWithStorage( diff --git a/frontend/src/components/resources/server/index.tsx b/frontend/src/components/resources/server/index.tsx index d8c149cc1..de5b925c6 100644 --- a/frontend/src/components/resources/server/index.tsx +++ b/frontend/src/components/resources/server/index.tsx @@ -1,20 +1,22 @@ import { useExecute, useLocalStorage, useRead } from "@lib/hooks"; import { cn } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { RequiredResourceComponents } from "@types"; import { Server, Cpu, MemoryStick, Database, - Scissors, - XOctagon, AreaChart, Milestone, AlertTriangle, + Play, + RefreshCcw, + Pause, + Square, } from "lucide-react"; import { Section } from "@components/layouts"; -import { RenameServer } from "./actions"; +import { Prune, RenameServer } from "./actions"; import { server_state_intention, stroke_color_class_by_intention, @@ -28,9 +30,10 @@ import { ActionWithDialog, ConfirmButton, StatusBadge } from "@components/util"; import { Button } from "@ui/button"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@ui/tabs"; import { RepoTable } from "../repo/table"; -import { ResourceComponents } from ".."; import { DashboardPieChart } from "@pages/home/dashboard"; import { StackTable } from "../stack/table"; +import { ResourceComponents } from ".."; +import { ServerInfo } from "./info"; export const useServer = (id?: string) => useRead("ListServers", {}, { refetchInterval: 5000 }).data?.find( @@ -51,44 +54,43 @@ const Icon = ({ id, size }: { id?: string; size: number }) => { const ConfigOrChildResources = ({ id }: { id: string }) => { const [view, setView] = useLocalStorage("server-tabs-v1", "Config"); - const deployments = useRead("ListDeployments", {}).data?.filter( - (deployment) => deployment.info.server_id === id - ); - const deploymentsDisabled = (deployments?.length || 0) === 0; - const repos = useRead("ListRepos", {}).data?.filter( - (repo) => repo.info.server_id === id - ); - const reposDisabled = (repos?.length || 0) === 0; - const stacks = useRead("ListStacks", {}).data?.filter( - (stack) => stack.info.server_id === id - ); - const stacksDisabled = (stacks?.length || 0) === 0; - const currentView = - (view === "Deployments" && deploymentsDisabled) || - (view === "Repos" && reposDisabled) - ? "Config" - : view; + + const deployments = + useRead("ListDeployments", {}).data?.filter( + (deployment) => deployment.info.server_id === id + ) ?? []; + const noDeployments = deployments.length === 0; + const repos = + useRead("ListRepos", {}).data?.filter( + (repo) => repo.info.server_id === id + ) ?? []; + const noRepos = repos.length === 0; + const stacks = + useRead("ListStacks", {}).data?.filter( + (stack) => stack.info.server_id === id + ) ?? []; + const noStacks = stacks.length === 0; + + const noResources = noDeployments && noRepos && noStacks; + + const currentView = view === "Resources" && noResources ? "Config" : view; + const tabsList = ( Config - - Deployments + + + Info + - Stacks - - - Repos + Resources ); @@ -98,30 +100,30 @@ const ConfigOrChildResources = ({ id }: { id: string }) => { - -
} - > - -
+ + - -
} - > - -
-
- - -
} - > - + +
+
} + > + +
+
} + > + +
+
} + > + +
@@ -265,27 +267,124 @@ export const ServerComponents: RequiredResourceComponents = { }, Actions: { - Prune: ({ id }) => { - const { mutate, isPending } = useExecute(`PruneImages`); - const pruning = useRead( + StartAll: ({ id }) => { + const server = useServer(id); + const { mutate, isPending } = useExecute("StartAllContainers"); + const starting = useRead( "GetServerActionState", { server: id }, { refetchInterval: 5000 } - ).data?.pruning_images; - const pending = isPending || pruning; + ).data?.starting_containers; + const dontShow = useRead("ListDockerContainers", { + server: id, + }).data?.every( + (container) => + container.state === Types.ContainerStateStatusEnum.Running + ) ?? true; + if (dontShow) { + return null; + } + const pending = isPending || starting; return ( - } - onClick={() => mutate({ server: id })} - loading={pending} - disabled={pending} - /> + server && ( + } + onClick={() => mutate({ server: id })} + loading={pending} + disabled={pending} + /> + ) + ); + }, + RestartAll: ({ id }) => { + const server = useServer(id); + const { mutate, isPending } = useExecute("RestartAllContainers"); + const restarting = useRead( + "GetServerActionState", + { server: id }, + { refetchInterval: 5000 } + ).data?.restarting_containers; + const pending = isPending || restarting; + return ( + server && ( + } + onClick={() => mutate({ server: id })} + disabled={pending} + loading={pending} + /> + ) + ); + }, + PauseAll: ({ id }) => { + const server = useServer(id); + const { mutate, isPending } = useExecute("PauseAllContainers"); + const pausing = useRead( + "GetServerActionState", + { server: id }, + { refetchInterval: 5000 } + ).data?.pausing_containers; + const dontShow = + useRead("ListDockerContainers", { + server: id, + }).data?.every( + (container) => + container.state !== Types.ContainerStateStatusEnum.Running + ) ?? true; + if (dontShow) { + return null; + } + const pending = isPending || pausing; + return ( + server && ( + } + onClick={() => mutate({ server: id })} + disabled={pending} + loading={pending} + /> + ) + ); + }, + UnpauseAll: ({ id }) => { + const server = useServer(id); + const { mutate, isPending } = useExecute("UnpauseAllContainers"); + const unpausing = useRead( + "GetServerActionState", + { server: id }, + { refetchInterval: 5000 } + ).data?.unpausing_containers; + const dontShow = + useRead("ListDockerContainers", { + server: id, + }).data?.every( + (container) => + container.state !== Types.ContainerStateStatusEnum.Paused + ) ?? true; + if (dontShow) { + return null; + } + const pending = isPending || unpausing; + return ( + server && ( + } + onClick={() => mutate({ server: id })} + loading={pending} + disabled={pending} + /> + ) ); }, StopAll: ({ id }) => { const server = useServer(id); - const { mutate, isPending } = useExecute(`StopAllContainers`); + const { mutate, isPending } = useExecute("StopAllContainers"); const stopping = useRead( "GetServerActionState", { server: id }, @@ -295,9 +394,9 @@ export const ServerComponents: RequiredResourceComponents = { return ( server && ( } + icon={} onClick={() => mutate({ server: id })} disabled={pending} loading={pending} @@ -305,6 +404,7 @@ export const ServerComponents: RequiredResourceComponents = { ) ); }, + PruneSystem: ({ id }) => , }, Page: {}, diff --git a/frontend/src/components/resources/server/info/containers.tsx b/frontend/src/components/resources/server/info/containers.tsx new file mode 100644 index 000000000..0f9b776a4 --- /dev/null +++ b/frontend/src/components/resources/server/info/containers.tsx @@ -0,0 +1,25 @@ +import { DockerContainersSection } from "@components/util"; +import { useRead } from "@lib/hooks"; + +export const Containers = ({ + id, + show, + setShow, +}: { + id: string; + show: boolean; + setShow: (show: boolean) => void; +}) => { + const containers = + useRead("ListDockerContainers", { server: id }, { refetchInterval: 5000 }) + .data ?? []; + return ( + + ); +}; diff --git a/frontend/src/components/resources/server/info/images.tsx b/frontend/src/components/resources/server/info/images.tsx new file mode 100644 index 000000000..14798958b --- /dev/null +++ b/frontend/src/components/resources/server/info/images.tsx @@ -0,0 +1,84 @@ +import { Section } from "@components/layouts"; +import { DockerResourceLink, ShowHideButton } from "@components/util"; +import { format_size_bytes } from "@lib/formatting"; +import { useRead } from "@lib/hooks"; +import { Badge } from "@ui/badge"; +import { DataTable, SortableHeader } from "@ui/data-table"; +import { HardDrive } from "lucide-react"; +import { Prune } from "../actions"; + +export const Images = ({ + id, + show, + setShow, +}: { + id: string; + show: boolean; + setShow: (show: boolean) => void; +}) => { + const images = + useRead("ListDockerImages", { server: id }, { refetchInterval: 5000 }) + .data ?? []; + + const allInUse = images.every((image) => image.in_use); + + return ( +
+
} + actions={ +
+ {!allInUse && } + +
+ } + > + {show && ( + ( + + ), + cell: ({ row }) => ( + Unused + ) + } + /> + ), + size: 200, + }, + { + accessorKey: "id", + header: ({ column }) => ( + + ), + }, + { + accessorKey: "size", + header: ({ column }) => ( + + ), + cell: ({ row }) => + row.original.size + ? format_size_bytes(row.original.size) + : "Unknown", + }, + ]} + /> + )} +
+
+ ); +}; diff --git a/frontend/src/components/resources/server/info/index.tsx b/frontend/src/components/resources/server/info/index.tsx new file mode 100644 index 000000000..6596416e6 --- /dev/null +++ b/frontend/src/components/resources/server/info/index.tsx @@ -0,0 +1,88 @@ +import { Section } from "@components/layouts"; +import { ReactNode } from "react"; +import { Networks } from "./networks"; +import { useServer } from ".."; +import { Types } from "@komodo/client"; +import { useLocalStorage } from "@lib/hooks"; +import { Images } from "./images"; +import { Containers } from "./containers"; +import { Volumes } from "./volumes"; +import { Button } from "@ui/button"; + +export const ServerInfo = ({ + id, + titleOther, +}: { + id: string; + titleOther: ReactNode; +}) => { + const state = useServer(id)?.info.state ?? Types.ServerState.NotOk; + const [show, setShow] = useLocalStorage<{ + containers: boolean; + networks: boolean; + images: boolean; + volumes: boolean; + }>("server-info-show-config", { + containers: true, + networks: true, + images: true, + volumes: true, + }); + + if ([Types.ServerState.NotOk, Types.ServerState.Disabled].includes(state)) { + return ( +
+

+ Server unreachable, info is not available +

+
+ ); + } + + const anyOpen = !Object.values(show).every((val) => !val); + + return ( +
+ setShow({ + containers: !anyOpen, + networks: !anyOpen, + images: !anyOpen, + volumes: !anyOpen, + }) + } + > + {anyOpen ? "Hide All" : "Show All"} + + } + > +
+ setShow({ ...show, containers })} + /> + setShow({ ...show, networks })} + /> + setShow({ ...show, volumes })} + /> + setShow({ ...show, images })} + /> +
+
+ ); +}; diff --git a/frontend/src/components/resources/server/info/networks.tsx b/frontend/src/components/resources/server/info/networks.tsx new file mode 100644 index 000000000..e0a3f4354 --- /dev/null +++ b/frontend/src/components/resources/server/info/networks.tsx @@ -0,0 +1,105 @@ +import { Section } from "@components/layouts"; +import { DockerResourceLink, ShowHideButton } from "@components/util"; +import { useRead } from "@lib/hooks"; +import { Badge } from "@ui/badge"; +import { DataTable, SortableHeader } from "@ui/data-table"; +import { Network } from "lucide-react"; +import { Prune } from "../actions"; + +export const Networks = ({ + id, + show, + setShow, +}: { + id: string; + show: boolean; + setShow: (show: boolean) => void; +}) => { + const networks = + useRead("ListDockerNetworks", { server: id }, { refetchInterval: 5000 }) + .data ?? []; + + const allInUse = networks.every((network) => + // this ignores networks that come in with no name, but they should all come in with name + !network.name + ? true + : ["none", "host", "bridge"].includes(network.name) + ? true + : network.in_use + ); + + return ( +
+
} + actions={ +
+ {!allInUse && } + +
+ } + > + {show && ( + ( + + ), + cell: ({ row }) => ( +
+ System + ) : ( + !row.original.in_use && ( + Unused + ) + ) + } + /> +
+ ), + size: 300, + }, + { + accessorKey: "driver", + header: ({ column }) => ( + + ), + }, + { + accessorKey: "scope", + header: ({ column }) => ( + + ), + }, + { + accessorKey: "attachable", + header: ({ column }) => ( + + ), + }, + { + accessorKey: "ipam_driver", + header: ({ column }) => ( + + ), + }, + ]} + /> + )} +
+
+ ); +}; diff --git a/frontend/src/components/resources/server/info/volumes.tsx b/frontend/src/components/resources/server/info/volumes.tsx new file mode 100644 index 000000000..2d2d20395 --- /dev/null +++ b/frontend/src/components/resources/server/info/volumes.tsx @@ -0,0 +1,78 @@ +import { Section } from "@components/layouts"; +import { DockerResourceLink, ShowHideButton } from "@components/util"; +import { useRead } from "@lib/hooks"; +import { Badge } from "@ui/badge"; +import { DataTable, SortableHeader } from "@ui/data-table"; +import { Database } from "lucide-react"; +import { Prune } from "../actions"; + +export const Volumes = ({ + id, + show, + setShow, +}: { + id: string; + show: boolean; + setShow: (show: boolean) => void; +}) => { + const volumes = + useRead("ListDockerVolumes", { server: id }, { refetchInterval: 5000 }) + .data ?? []; + + const allInUse = volumes.every((volume) => volume.in_use); + + return ( +
+
} + actions={ +
+ {!allInUse && } + +
+ } + > + {show && ( + ( + + ), + cell: ({ row }) => ( + Unused + ) + } + /> + ), + size: 200, + }, + { + accessorKey: "driver", + header: ({ column }) => ( + + ), + }, + { + accessorKey: "scope", + header: ({ column }) => ( + + ), + }, + ]} + /> + )} +
+
+ ); +}; diff --git a/frontend/src/components/resources/server/stat-chart.tsx b/frontend/src/components/resources/server/stat-chart.tsx index 190780259..386ad3e9f 100644 --- a/frontend/src/components/resources/server/stat-chart.tsx +++ b/frontend/src/components/resources/server/stat-chart.tsx @@ -1,7 +1,7 @@ import { hex_color_by_intention } from "@lib/color"; import { useRead } from "@lib/hooks"; import { convertTsMsToLocalUnixTsInSecs } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ColorType, IChartApi, diff --git a/frontend/src/components/resources/server/stats.tsx b/frontend/src/components/resources/server/stats.tsx index 498cd1766..0beac254c 100644 --- a/frontend/src/components/resources/server/stats.tsx +++ b/frontend/src/components/resources/server/stats.tsx @@ -9,7 +9,7 @@ import { import { Progress } from "@ui/progress"; import { Cpu, Database, MemoryStick } from "lucide-react"; import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ServerComponents, useServer } from "."; import { DataTable, SortableHeader } from "@ui/data-table"; import { Fragment, useState } from "react"; diff --git a/frontend/src/components/resources/server/table.tsx b/frontend/src/components/resources/server/table.tsx index 17088d94d..90793f841 100644 --- a/frontend/src/components/resources/server/table.tsx +++ b/frontend/src/components/resources/server/table.tsx @@ -3,7 +3,7 @@ import { useRead } from "@lib/hooks"; import { DataTable, SortableHeader } from "@ui/data-table"; import { ServerComponents } from "."; import { ResourceLink } from "../common"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useCallback } from "react"; export const ServerTable = ({ @@ -12,11 +12,17 @@ export const ServerTable = ({ servers: Types.ServerListItem[]; }) => { const deployments = useRead("ListDeployments", {}).data; - const deploymentCount = useCallback( + const stacks = useRead("ListStacks", {}).data; + const repos = useRead("ListRepos", {}).data; + const resourcesCount = useCallback( (id: string) => { - return deployments?.filter((d) => d.info.server_id === id).length || 0; + return ( + (deployments?.filter((d) => d.info.server_id === id).length || 0) + + (stacks?.filter((d) => d.info.server_id === id).length || 0) + + (repos?.filter((d) => d.info.server_id === id).length || 0) + ); }, - [deployments] + [deployments, stacks, repos] ); return ( { - const sa = deploymentCount(a.original.id); - const sb = deploymentCount(b.original.id); + const sa = resourcesCount(a.original.id); + const sb = resourcesCount(b.original.id); if (!sa && !sb) return 0; if (!sa) return -1; @@ -48,13 +54,10 @@ export const ServerTable = ({ else return 0; }, header: ({ column }) => ( - + ), cell: ({ row }) => { - const count = - deployments?.filter((d) => d.info.server_id === row.original.id) - .length ?? 0; - return <>{count}; + return <>{resourcesCount(row.original.id)}; }, }, { @@ -79,4 +82,4 @@ export const ServerTable = ({ ]} /> ); -}; \ No newline at end of file +}; diff --git a/frontend/src/components/resources/stack/actions.tsx b/frontend/src/components/resources/stack/actions.tsx index 7050096c7..a671d02a5 100644 --- a/frontend/src/components/resources/stack/actions.tsx +++ b/frontend/src/components/resources/stack/actions.tsx @@ -7,10 +7,10 @@ import { RefreshCcw, Rocket, Square, - Trash2, + Trash, } from "lucide-react"; import { useStack } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useToast } from "@ui/use-toast"; import { useState } from "react"; import { Input } from "@ui/input"; @@ -88,7 +88,7 @@ export const DestroyStack = ({ id }: { id: string }) => { } + icon={} onClick={() => destroy({ stack: id })} disabled={isPending} loading={isPending || destroying} @@ -116,12 +116,12 @@ export const RestartStack = ({ const container_state = (service && services?.find((s) => s.service === service)?.container?.state) ?? - Types.DeploymentState.Unknown; + Types.ContainerStateStatusEnum.Empty; if ( !stack || stack?.info.project_missing || - (service && container_state !== Types.DeploymentState.Running) || + (service && container_state !== Types.ContainerStateStatusEnum.Running) || state !== Types.StackState.Running ) { return null; @@ -166,7 +166,7 @@ export const StartStopStack = ({ } if ( - (service && container_state === Types.DeploymentState.Exited) || + (service && container_state === Types.ContainerStateStatusEnum.Exited) || state === Types.StackState.Stopped ) { return ( @@ -180,7 +180,7 @@ export const StartStopStack = ({ ); } if ( - (service && container_state === Types.DeploymentState.Running) || + (service && container_state === Types.ContainerStateStatusEnum.Running) || state === Types.StackState.Running ) { return ( @@ -224,7 +224,7 @@ export const PauseUnpauseStack = ({ } if ( - (service && container_state === Types.DeploymentState.Paused) || + (service && container_state === Types.ContainerStateStatusEnum.Paused) || state === Types.StackState.Paused ) { return ( @@ -238,7 +238,7 @@ export const PauseUnpauseStack = ({ ); } if ( - (service && container_state === Types.DeploymentState.Running) || + (service && container_state === Types.ContainerStateStatusEnum.Running) || state === Types.StackState.Running ) { return ( diff --git a/frontend/src/components/resources/stack/config.tsx b/frontend/src/components/resources/stack/config.tsx index 31d1e2f89..c1a6f7fab 100644 --- a/frontend/src/components/resources/stack/config.tsx +++ b/frontend/src/components/resources/stack/config.tsx @@ -8,7 +8,7 @@ import { SecretsForEnvironment, } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { createRef, ReactNode, useState } from "react"; import { CopyGithubWebhook, ServerSelector } from "../common"; import { useToast } from "@ui/use-toast"; @@ -420,7 +420,7 @@ export const StackConfig = ({ { label: "Git Webhooks", description: - "Configure your repo provider to send webhooks to Monitor", + "Configure your repo provider to send webhooks to Komodo", components: { ["Guard" as any]: () => { if (update.branch ?? config.branch) { diff --git a/frontend/src/components/resources/stack/index.tsx b/frontend/src/components/resources/stack/index.tsx index e6afb62ed..86fa4764d 100644 --- a/frontend/src/components/resources/stack/index.tsx +++ b/frontend/src/components/resources/stack/index.tsx @@ -19,7 +19,7 @@ import { import { cn } from "@lib/utils"; import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; import { useServer } from "../server"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DeployStack, DestroyStack, @@ -151,7 +151,12 @@ export const StackComponents: RequiredResourceComponents = { }, NoConfig: ({ id }) => { const config = useFullStack(id)?.config; - if (config?.files_on_host || config?.file_contents || config?.repo) { + if ( + !config || + config?.files_on_host || + config?.file_contents || + config?.repo + ) { return null; } return ( @@ -177,6 +182,7 @@ export const StackComponents: RequiredResourceComponents = { const info = useStack(id)?.info; const state = info?.state ?? Types.StackState.Unknown; if ( + !info || !info?.project_missing || ![Types.StackState.Down, Types.StackState.Unknown].includes(state) ) { @@ -204,7 +210,7 @@ export const StackComponents: RequiredResourceComponents = { RemoteErrors: ({ id }) => { const info = useFullStack(id)?.info; const errors = info?.remote_errors; - if (!errors || errors.length === 0) { + if (!info || !errors || errors.length === 0) { return null; } return ( diff --git a/frontend/src/components/resources/stack/info.tsx b/frontend/src/components/resources/stack/info.tsx index eceac57a3..11999b382 100644 --- a/frontend/src/components/resources/stack/info.tsx +++ b/frontend/src/components/resources/stack/info.tsx @@ -2,7 +2,7 @@ import { Section } from "@components/layouts"; import { ReactNode } from "react"; import { Card, CardHeader } from "@ui/card"; import { useFullStack, useStack } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { updateLogToHtml } from "@lib/utils"; export const StackInfo = ({ diff --git a/frontend/src/components/resources/stack/services.tsx b/frontend/src/components/resources/stack/services.tsx index 568c1a948..1d1f84cf0 100644 --- a/frontend/src/components/resources/stack/services.tsx +++ b/frontend/src/components/resources/stack/services.tsx @@ -1,13 +1,13 @@ import { Section } from "@components/layouts"; import { - deployment_state_intention, + container_state_intention, stroke_color_class_by_intention, } from "@lib/color"; import { useRead } from "@lib/hooks"; import { cn } from "@lib/utils"; import { DataTable, SortableHeader } from "@ui/data-table"; import { useStack } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { ReactNode } from "react"; import { Link } from "react-router-dom"; import { Button } from "@ui/button"; @@ -49,7 +49,7 @@ export const StackServices = ({ cell: ({ row }) => { const state = row.original.container?.state; const color = stroke_color_class_by_intention( - deployment_state_intention(state) + container_state_intention(state) ); return ( ( - + ), cell: ({ row }) => { const state = row.original.container?.state; return ( ); }, diff --git a/frontend/src/components/resources/stack/table.tsx b/frontend/src/components/resources/stack/table.tsx index 51fcb18c3..cd1bb68f1 100644 --- a/frontend/src/components/resources/stack/table.tsx +++ b/frontend/src/components/resources/stack/table.tsx @@ -3,7 +3,7 @@ import { DataTable, SortableHeader } from "@ui/data-table"; import { ResourceLink } from "../common"; import { TableTags } from "@components/tags"; import { StackComponents } from "."; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useCallback } from "react"; export const StackTable = ({ stacks }: { stacks: Types.StackListItem[] }) => { diff --git a/frontend/src/components/tags/index.tsx b/frontend/src/components/tags/index.tsx index 65b7b1592..b9c120942 100644 --- a/frontend/src/components/tags/index.tsx +++ b/frontend/src/components/tags/index.tsx @@ -6,7 +6,7 @@ import { useWrite, } from "@lib/hooks"; import { cn, filterBySplit } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Badge } from "@ui/badge"; import { Button } from "@ui/button"; import { diff --git a/frontend/src/components/topbar.tsx b/frontend/src/components/topbar.tsx index acde8c0d5..8203d4407 100644 --- a/frontend/src/components/topbar.tsx +++ b/frontend/src/components/topbar.tsx @@ -54,7 +54,7 @@ export const Topbar = () => { className="flex gap-3 items-center text-2xl tracking-widest md:mx-2" > -
MONITOR
+
KOMODO
{/* Searchbar */} @@ -86,7 +86,7 @@ const Version = () => { if (!version) return null; return ( diff --git a/frontend/src/components/updates/resource.tsx b/frontend/src/components/updates/resource.tsx index a680ca406..61108746b 100644 --- a/frontend/src/components/updates/resource.tsx +++ b/frontend/src/components/updates/resource.tsx @@ -10,10 +10,10 @@ import { Milestone, } from "lucide-react"; import { Link } from "react-router-dom"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Section } from "@components/layouts"; import { UpdateDetails, UpdateUser } from "./details"; -import { UpdateStatus } from "@monitor/client/dist/types"; +import { UpdateStatus } from "@komodo/client/dist/types"; import { fmt_date, fmt_operation, fmt_version } from "@lib/formatting"; import { getUpdateQuery, diff --git a/frontend/src/components/updates/table.tsx b/frontend/src/components/updates/table.tsx index 364e011fc..e71f13198 100644 --- a/frontend/src/components/updates/table.tsx +++ b/frontend/src/components/updates/table.tsx @@ -1,5 +1,5 @@ import { fmt_date_with_minutes, fmt_operation } from "@lib/formatting"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DataTable } from "@ui/data-table"; import { useState } from "react"; import { UpdateDetailsInner, UpdateUser } from "./details"; diff --git a/frontend/src/components/updates/topbar.tsx b/frontend/src/components/updates/topbar.tsx index 9c151a51c..3036c58f2 100644 --- a/frontend/src/components/updates/topbar.tsx +++ b/frontend/src/components/updates/topbar.tsx @@ -11,7 +11,7 @@ import { Calendar } from "lucide-react"; import { UpdateDetails, UpdateUser } from "./details"; import { ResourceComponents } from "@components/resources"; import { cn, version_is_none } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { fmt_date, fmt_operation, fmt_version } from "@lib/formatting"; import { ResourceName } from "@components/resources/common"; import { UsableResource } from "@types"; diff --git a/frontend/src/components/users/hooks.ts b/frontend/src/components/users/hooks.ts index 8e99fc734..5dc2e6205 100644 --- a/frontend/src/components/users/hooks.ts +++ b/frontend/src/components/users/hooks.ts @@ -1,5 +1,5 @@ import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { UsableResource } from "@types"; export const useUserTargetPermissions = (user_target: Types.UserTarget) => { diff --git a/frontend/src/components/users/permissions-table.tsx b/frontend/src/components/users/permissions-table.tsx index e0d77cb42..bc965570e 100644 --- a/frontend/src/components/users/permissions-table.tsx +++ b/frontend/src/components/users/permissions-table.tsx @@ -1,5 +1,5 @@ import { useInvalidate, useWrite } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { UsableResource } from "@types"; import { useToast } from "@ui/use-toast"; import { useState } from "react"; diff --git a/frontend/src/components/users/resource-type-permissions.tsx b/frontend/src/components/users/resource-type-permissions.tsx index 270b9d0e4..f96b67d7c 100644 --- a/frontend/src/components/users/resource-type-permissions.tsx +++ b/frontend/src/components/users/resource-type-permissions.tsx @@ -2,7 +2,7 @@ import { PermissionLevelSelector } from "@components/config/util"; import { Section } from "@components/layouts"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; import { RESOURCE_TARGETS } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { useToast } from "@ui/use-toast"; export const UserTargetPermissionsOnResourceTypes = ({ diff --git a/frontend/src/components/users/table.tsx b/frontend/src/components/users/table.tsx index bca31b08f..72f4a0d53 100644 --- a/frontend/src/components/users/table.tsx +++ b/frontend/src/components/users/table.tsx @@ -1,5 +1,5 @@ import { text_color_class_by_intention } from "@lib/color"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { DataTable } from "@ui/data-table"; import { useNavigate } from "react-router-dom"; import { ColumnDef } from "@tanstack/react-table"; diff --git a/frontend/src/components/util.tsx b/frontend/src/components/util.tsx index b313ba2ef..7ff18ce90 100644 --- a/frontend/src/components/util.tsx +++ b/frontend/src/components/util.tsx @@ -8,12 +8,19 @@ import { } from "react"; import { Button } from "../ui/button"; import { + Box, Check, CheckCircle, + ChevronDown, + ChevronUp, Copy, + Database, + HardDrive, Loader2, LogOut, + Network, Settings, + Tags, User, } from "lucide-react"; import { Input } from "../ui/input"; @@ -34,10 +41,17 @@ import { Card } from "@ui/card"; import { snake_case_to_upper_space_case } from "@lib/formatting"; import { ColorIntention, + container_state_intention, hex_color_by_intention, + stroke_color_class_by_intention, text_color_class_by_intention, } from "@lib/color"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; +import { Badge } from "@ui/badge"; +import { Section } from "./layouts"; +import { DataTable, SortableHeader } from "@ui/data-table"; +import { useRead } from "@lib/hooks"; +import { Prune } from "./resources/server/actions"; export const WithLoading = ({ children, @@ -96,10 +110,10 @@ export const ActionButton = forwardRef< + ); +}; + +type DockerResourceType = "container" | "network" | "image" | "volume"; + +export const DOCKER_LINK_ICONS: { + [type in DockerResourceType]: React.FC<{ + server_id: string; + name: string | undefined; + size?: number; + }>; +} = { + container: ({ server_id, name, size = 4 }) => { + const state = + useRead("ListDockerContainers", { server: server_id }).data?.find( + (container) => container.name === name + )?.state ?? Types.ContainerStateStatusEnum.Empty; + return ( + + ); + }, + network: ({ server_id, name, size = 4 }) => { + const containers = + useRead("ListDockerContainers", { server: server_id }).data ?? []; + const no_containers = !name + ? false + : containers.every((container) => !container.networks?.includes(name)); + return ( + + ); + }, + image: ({ server_id, name, size = 4 }) => { + const containers = + useRead("ListDockerContainers", { server: server_id }).data ?? []; + const no_containers = !name + ? false + : containers.every((container) => container.image_id !== name); + return ( + + ); + }, + volume: ({ server_id, name, size = 4 }) => { + const containers = + useRead("ListDockerContainers", { server: server_id }).data ?? []; + const no_containers = !name + ? false + : containers.every((container) => !container.volumes?.includes(name)); + return ( + + ); + }, +}; + +export const DockerResourceLink = ({ + server_id, + name, + id, + type, + extra, + muted, +}: { + server_id: string; + name: string | undefined; + id?: string; + type: "container" | "network" | "image" | "volume"; + extra?: ReactNode; + muted?: boolean; +}) => { + if (!name) return "Unknown"; + + const Icon = DOCKER_LINK_ICONS[type]; + + return ( + + + + ); +}; + +export const DockerResourcePageName = ({ name: _name }: { name?: string }) => { + const name = _name ?? "Unknown"; + return ( +

+ {name} +

+ ); +}; + +export const DockerContainersSection = ({ + server_id, + containers, + show = true, + setShow, + pruneButton, +}: { + server_id: string; + containers: Types.ListDockerContainersResponse; + show?: boolean; + setShow?: (show: boolean) => void; + pruneButton?: boolean; +}) => { + const allRunning = useRead("ListDockerContainers", { + server: server_id, + }).data?.every( + (container) => container.state === Types.ContainerStateStatusEnum.Running + ); + return ( +
+
} + actions={ +
+ {pruneButton && !allRunning && ( + + )} + {setShow && } +
+ } + > + {show && ( + ( + + ), + cell: ({ row }) => ( + + ), + size: 200, + }, + { + accessorKey: "image", + header: ({ column }) => ( + + ), + cell: ({ row }) => ( + + ), + }, + { + accessorKey: "network_mode", + header: ({ column }) => ( + + ), + cell: ({ row }) => ( + + ), + }, + { + accessorKey: "state", + header: ({ column }) => ( + + ), + cell: ({ row }) => { + const state = row.original?.state; + return ( + + ); + }, + }, + ]} + /> + )} +
+
+ ); +}; diff --git a/frontend/src/globals.css b/frontend/src/globals.css index e00fb65a6..a306adee6 100644 --- a/frontend/src/globals.css +++ b/frontend/src/globals.css @@ -1,5 +1,3 @@ -@import url("https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap"); - @tailwind base; @tailwind components; @tailwind utilities; @@ -20,12 +18,19 @@ body { @apply bg-background text-foreground; - font-family: Inter; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", + "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", + sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; overflow-y: hidden; } pre { @apply bg-card text-card-foreground border rounded-xl min-h-full text-xs p-4 whitespace-pre-wrap scroll-m-4 break-all; } + code { + font-family: source-code-pro, Menlo, Monaco, Consolas, "Courier New", monospace; + } } @layer base { diff --git a/frontend/src/lib/color.ts b/frontend/src/lib/color.ts index 50514cbba..554e371e2 100644 --- a/frontend/src/lib/color.ts +++ b/frontend/src/lib/color.ts @@ -1,4 +1,4 @@ -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export type ColorIntention = | "Good" @@ -127,6 +127,23 @@ export const deployment_state_intention: ( } }; +export const container_state_intention: ( + state?: Types.ContainerStateStatusEnum +) => ColorIntention = (state) => { + switch (state) { + case undefined: + return "None"; + case Types.ContainerStateStatusEnum.Running: + return "Good"; + case Types.ContainerStateStatusEnum.Paused: + return "Warning"; + case Types.ContainerStateStatusEnum.Empty: + return "Unknown"; + default: + return "Critical"; + } +}; + export const build_state_intention = (status?: Types.BuildState) => { switch (status) { case undefined: diff --git a/frontend/src/lib/formatting.ts b/frontend/src/lib/formatting.ts index 48f5f2d6c..780d1824b 100644 --- a/frontend/src/lib/formatting.ts +++ b/frontend/src/lib/formatting.ts @@ -1,4 +1,4 @@ -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export const fmt_date = (d: Date) => { const hours = d.getHours(); @@ -73,3 +73,14 @@ export function snake_case_to_upper_space_case(snake: string) { .map((item) => item[0].toUpperCase() + item.slice(1)) .join(" "); } + +const BYTES_PER_MB = 1e6; +const BYTES_PER_GB = BYTES_PER_MB * 1000; + +export function format_size_bytes(size_bytes: number) { + if (size_bytes > BYTES_PER_GB) { + return `${(size_bytes / BYTES_PER_GB).toFixed(1)} GB`; + } else { + return `${(size_bytes / BYTES_PER_MB).toFixed(1)} MB`; + } +} diff --git a/frontend/src/lib/hooks.ts b/frontend/src/lib/hooks.ts index 9aaf3f12b..065b799ca 100644 --- a/frontend/src/lib/hooks.ts +++ b/frontend/src/lib/hooks.ts @@ -1,12 +1,12 @@ -import { AUTH_TOKEN_STORAGE_KEY, MONITOR_BASE_URL } from "@main"; -import { MonitorClient as Client, Types } from "@monitor/client"; +import { AUTH_TOKEN_STORAGE_KEY, KOMODO_BASE_URL } from "@main"; +import { KomodoClient as Client, Types } from "@komodo/client"; import { AuthResponses, ExecuteResponses, ReadResponses, UserResponses, WriteResponses, -} from "@monitor/client/dist/responses"; +} from "@komodo/client/dist/responses"; import { UseMutationOptions, UseQueryOptions, @@ -25,7 +25,7 @@ import { useParams } from "react-router-dom"; const token = () => ({ jwt: localStorage.getItem(AUTH_TOKEN_STORAGE_KEY) ?? "", }); -const client = () => Client(MONITOR_BASE_URL, { type: "jwt", params: token() }); +const client = () => Client(KOMODO_BASE_URL, { type: "jwt", params: token() }); export const useLoginOptions = () => useQuery({ @@ -339,7 +339,7 @@ export const useShiftKeyListener = (listenKey: string, onPress: () => void) => { }); }; -// Returns true if monitor has no resources. +// Returns true if Komodo has no resources. export const useNoResources = () => { const servers = useRead("ListServers", {}, { refetchInterval: 5000 }).data?.length ?? 0; diff --git a/frontend/src/lib/socket.tsx b/frontend/src/lib/socket.tsx index 42e421593..ea001a4eb 100644 --- a/frontend/src/lib/socket.tsx +++ b/frontend/src/lib/socket.tsx @@ -1,5 +1,5 @@ import { useInvalidate, useUser } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { toast } from "@ui/use-toast"; import { atom, useAtom } from "jotai"; @@ -78,10 +78,40 @@ const on_message = ( invalidate(["GetResourceSyncActionState", { sync: update.target.id }]); } + // Invalidate lists for execution updates - update status + if (update.operation === Types.Operation.RunBuild) { + invalidate(["ListBuilds"]); + } else if ( + [ + Types.Operation.CloneRepo, + Types.Operation.PullRepo, + Types.Operation.BuildRepo, + ].includes(update.operation) + ) { + invalidate(["ListRepos"]); + } else if (update.operation === Types.Operation.RunProcedure) { + invalidate(["ListProcedures"]); + } + // Do invalidations of these only if update is completed if (update.status === Types.UpdateStatus.Complete) { invalidate(["ListAlerts"]); + // Invalidate docker infos + if (["Server", "Deployment", "Stack"].includes(update.target.type)) { + invalidate( + ["ListDockerContainers"], + ["InspectDockerContainer"], + ["ListDockerNetworks"], + ["InspectDockerNetwork"], + ["ListDockerImages"], + ["InspectDockerImage"], + ["ListDockerVolumes"], + ["InspectDockerVolume"], + ["GetResourceMatchingContainer"] + ); + } + if (update.target.type === "Deployment") { invalidate( ["ListDeployments"], @@ -90,8 +120,10 @@ const on_message = ( ["ListDockerNetworks"], ["ListDockerImages"], ["GetDeployment", { deployment: update.target.id }], - ["GetLog", { deployment: update.target.id }], - ["GetDeploymentContainer", { deployment: update.target.id }] + ["GetDeploymentLog", { deployment: update.target.id }], + ["SearchDeploymentLog", { deployment: update.target.id }], + ["GetDeploymentContainer", { deployment: update.target.id }], + ["GetResourceMatchingContainer"] ); } @@ -100,15 +132,16 @@ const on_message = ( ["ListStacks"], ["ListFullStacks"], ["GetStacksSummary"], - ["GetStackServiceLog"], - ["SearchStackServiceLog"], ["ListCommonStackExtraArgs"], ["ListComposeProjects"], ["ListDockerContainers"], ["ListDockerNetworks"], ["ListDockerImages"], + ["GetStackServiceLog", { stack: update.target.id }], + ["SearchStackServiceLog", { stack: update.target.id }], ["GetStack", { stack: update.target.id }], - ["ListStackServices", { stack: update.target.id }] + ["ListStackServices", { stack: update.target.id }], + ["GetResourceMatchingContainer"] ); } @@ -289,7 +322,6 @@ const make_websocket = ({ on_message: (e: MessageEvent) => void; on_close: () => void; }) => { - console.log("init websocket"); const ws = new WebSocket(url); const _on_open = () => { diff --git a/frontend/src/lib/utils.ts b/frontend/src/lib/utils.ts index de1613a81..16683763b 100644 --- a/frontend/src/lib/utils.ts +++ b/frontend/src/lib/utils.ts @@ -1,5 +1,5 @@ import { ResourceComponents } from "@components/resources"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { UsableResource } from "@types"; import Convert from "ansi-to-html"; import { type ClassValue, clsx } from "clsx"; diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx index f105fed35..bf9775b6c 100644 --- a/frontend/src/main.tsx +++ b/frontend/src/main.tsx @@ -7,13 +7,13 @@ import { WebsocketProvider } from "@lib/socket"; import { Toaster } from "@ui/toaster"; import { atomWithStorage } from "@lib/hooks"; -export const AUTH_TOKEN_STORAGE_KEY = "monitor-auth-token"; +export const AUTH_TOKEN_STORAGE_KEY = "komodo-auth-token"; -export const MONITOR_BASE_URL = - import.meta.env.VITE_MONITOR_HOST ?? location.origin; +export const KOMODO_BASE_URL = + import.meta.env.VITE_KOMODO_HOST ?? location.origin; export const UPDATE_WS_URL = - MONITOR_BASE_URL.replace("http", "ws") + "/ws/update"; + KOMODO_BASE_URL.replace("http", "ws") + "/ws/update"; const query_client = new QueryClient({ defaultOptions: { queries: { retry: false } }, diff --git a/frontend/src/pages/alerts.tsx b/frontend/src/pages/alerts.tsx index fbc44aa6e..f484025cc 100644 --- a/frontend/src/pages/alerts.tsx +++ b/frontend/src/pages/alerts.tsx @@ -1,7 +1,7 @@ import { AlertsTable } from "@components/alert/table"; import { Page } from "@components/layouts"; import { useRead, useResourceParamType } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Label } from "@ui/label"; import { diff --git a/frontend/src/pages/home/all_resources.tsx b/frontend/src/pages/home/all_resources.tsx index cc05fda5a..fe80eb8f2 100644 --- a/frontend/src/pages/home/all_resources.tsx +++ b/frontend/src/pages/home/all_resources.tsx @@ -2,6 +2,7 @@ import { ExportButton } from "@components/export"; import { Page, Section } from "@components/layouts"; import { ResourceComponents } from "@components/resources"; import { TagsFilter } from "@components/tags"; +import { ShowHideButton } from "@components/util"; import { useFilterResources, useNoResources, @@ -10,11 +11,10 @@ import { useUser, } from "@lib/hooks"; import { cn } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { RequiredResourceComponents, UsableResource } from "@types"; -import { Button } from "@ui/button"; import { Input } from "@ui/input"; -import { AlertTriangle, ChevronDown, ChevronUp } from "lucide-react"; +import { AlertTriangle } from "lucide-react"; import { useState } from "react"; export const AllResources = () => { @@ -32,7 +32,7 @@ export const AllResources = () => { placeholder="search..." className="w-[200px] lg:w-[300px]" /> - +
@@ -90,21 +90,7 @@ const TableSection = ({ key={type} title={type + "s"} icon={} - actions={ - - } + actions={} >
{show && } diff --git a/frontend/src/pages/home/dashboard.tsx b/frontend/src/pages/home/dashboard.tsx index 13dcee5ae..8e49cf52e 100644 --- a/frontend/src/pages/home/dashboard.tsx +++ b/frontend/src/pages/home/dashboard.tsx @@ -1,17 +1,23 @@ import { ExportButton } from "@components/export"; import { Page, Section } from "@components/layouts"; import { ResourceComponents } from "@components/resources"; -import { ResourceName } from "@components/resources/common"; +import { ResourceLink, ResourceName } from "@components/resources/common"; import { TagsWithBadge } from "@components/tags"; +import { StatusBadge } from "@components/util"; import { + build_state_intention, ColorIntention, hex_color_by_intention, + procedure_state_intention, + repo_state_intention, text_color_class_by_intention, } from "@lib/color"; import { useNoResources, useRead, useUser } from "@lib/hooks"; import { cn, usableResourcePath } from "@lib/utils"; +import { Types } from "@komodo/client"; import { UsableResource } from "@types"; -import { AlertTriangle, Boxes, History } from "lucide-react"; +import { DataTable, SortableHeader } from "@ui/data-table"; +import { AlertTriangle, Boxes, Circle, History } from "lucide-react"; import { PieChart } from "react-minimal-pie-chart"; import { Link } from "react-router-dom"; @@ -20,6 +26,7 @@ export const Dashboard = () => { const user = useUser().data!; return ( +
} @@ -181,3 +188,92 @@ export const DashboardPieChart = ({
); }; + +const ActiveResources = () => { + const builds = + useRead("ListBuilds", {}).data?.filter( + (build) => build.info.state === Types.BuildState.Building + ) ?? []; + const repos = + useRead("ListRepos", {}).data?.filter((repo) => + [ + Types.RepoState.Building, + Types.RepoState.Cloning, + Types.RepoState.Pulling, + ].includes(repo.info.state) + ) ?? []; + const procedures = + useRead("ListProcedures", {}).data?.filter( + (procedure) => procedure.info.state === Types.ProcedureState.Running + ) ?? []; + + const resources = [ + ...(builds ?? []).map((build) => ({ + type: "Build" as UsableResource, + id: build.id, + state: ( + + ), + })), + ...(repos ?? []).map((repo) => ({ + type: "Repo" as UsableResource, + id: repo.id, + state: ( + + ), + })), + ...(procedures ?? []).map((procedure) => ({ + type: "Procedure" as UsableResource, + id: procedure.id, + state: ( + + ), + })), + ]; + + if (resources.length === 0) return null; + + return ( +
+ } + > + ( + + ), + cell: ({ row }) => ( + + ), + }, + { + accessorKey: "type", + header: ({ column }) => ( + + ), + }, + { + header: "State", + cell: ({ row }) => row.original.state, + }, + ]} + /> +
+ ); +}; diff --git a/frontend/src/pages/login.tsx b/frontend/src/pages/login.tsx index bc8f2e1e4..4ab528870 100644 --- a/frontend/src/pages/login.tsx +++ b/frontend/src/pages/login.tsx @@ -12,7 +12,7 @@ import { Label } from "@ui/label"; import { useAuth, useLoginOptions, useUserInvalidate } from "@lib/hooks"; import { useState } from "react"; import { ThemeToggle } from "@ui/theme"; -import { AUTH_TOKEN_STORAGE_KEY, MONITOR_BASE_URL } from "@main"; +import { AUTH_TOKEN_STORAGE_KEY, KOMODO_BASE_URL } from "@main"; import { Loader2, X } from "lucide-react"; import { cn } from "@lib/utils"; @@ -21,7 +21,7 @@ type OauthProvider = "Github" | "Google"; const login_with_oauth = (provider: OauthProvider) => { const redirect = encodeURIComponent(location.href); location.replace( - `${MONITOR_BASE_URL}/auth/${provider.toLowerCase()}/login?redirect=${redirect}` + `${KOMODO_BASE_URL}/auth/${provider.toLowerCase()}/login?redirect=${redirect}` ); }; @@ -44,10 +44,6 @@ const useExchangeToken = () => { localStorage.setItem(AUTH_TOKEN_STORAGE_KEY, jwt); sanitize_query(search); }, - onError: (e) => { - console.log("exchange token for jwt error:", e); - sanitize_query(search); - }, }); // In this case, failed to get user (jwt unset / invalid) @@ -101,7 +97,8 @@ export const Login = () => { // Otherwise just standard login return (
-
+
+
{
- Monitor + Komodo Log In
@@ -197,7 +194,7 @@ export const Login = () => { No login methods have been configured. See the + + +
+ + {/* TITLE */} +
+
+ +
+ +
+ + {status && ( +

{status}

+ )} +
+
+ + {/* INFO */} +
+ + +
+
+ + {/* Actions */} + {canExecute && ( +
}> +
+ {Object.entries(Actions).map(([key, Action]) => ( + + ))} +
+
+ )} + + {/* Updates */} + + + + + {/* TOP LEVEL CONTAINER INFO */} +
}> + +
+ + +
+ ); +}; + +const AttachedResource = ({ + id, + container, +}: { + id: string; + container: string; +}) => { + const { data: attached, isPending } = useRead( + "GetResourceMatchingContainer", + { server: id, container }, + { refetchInterval: 30_000 } + ); + + if (isPending) { + return ; + } + + if (!attached || !attached.resource) { + return null; + } + + return ( + <> + | +
+
{attached.resource.type}:
+ +
+ + ); +}; + +const NewDeployment = ({ + id, + container, +}: { + id: string; + container: string; +}) => { + const { data: attached, isPending } = useRead( + "GetResourceMatchingContainer", + { server: id, container } + ); + + if (isPending) { + return ; + } + + if (!attached) { + return null; + } + + if (!attached?.resource) { + return ; + } +}; diff --git a/frontend/src/pages/server-info/container/log.tsx b/frontend/src/pages/server-info/container/log.tsx new file mode 100644 index 000000000..f641042a0 --- /dev/null +++ b/frontend/src/pages/server-info/container/log.tsx @@ -0,0 +1,162 @@ +import { Section } from "@components/layouts"; +import { Log, TailLengthSelector } from "@components/log"; +import { useRead } from "@lib/hooks"; +import { Types } from "@komodo/client"; +import { Button } from "@ui/button"; +import { Input } from "@ui/input"; +import { Switch } from "@ui/switch"; +import { ToggleGroup, ToggleGroupItem } from "@ui/toggle-group"; +import { useToast } from "@ui/use-toast"; +import { AlertOctagon, RefreshCw, ScrollText, X } from "lucide-react"; +import { useState } from "react"; + +export const ContainerLogs = ({ + id, + container_name, +}: { + /// Server id + id: string; + container_name: string; +}) => { + const { toast } = useToast(); + const [stream, setStream] = useState("stdout"); + const [tail, set] = useState("100"); + const [terms, setTerms] = useState([]); + const [invert, setInvert] = useState(false); + const [search, setSearch] = useState(""); + + const addTerm = () => { + if (!search.length) return; + if (terms.includes(search)) { + toast({ title: "Search term is already present" }); + setSearch(""); + return; + } + setTerms([...terms, search]); + setSearch(""); + }; + + const clearSearch = () => { + setSearch(""); + setTerms([]); + }; + + const { Log, refetch, stderr } = terms.length + ? SearchLogs(id, container_name, terms, invert) + : NoSearchLogs(id, container_name, tail, stream); + + return ( +
} + itemsCenterTitleRow + actions={ +
+
+
+
Invert
+
Search
+
+ +
+ {terms.map((term, index) => ( + + ))} +
+ setSearch(e.target.value)} + onBlur={addTerm} + onKeyDown={(e) => { + if (e.key === "Enter") addTerm(); + }} + className="w-[180px] xl:w-[240px]" + /> + +
+ + stdout + + stderr + {stderr && ( + + )} + + + + 0} + /> +
+ } + > + {Log} +
+ ); +}; + +const NoSearchLogs = ( + id: string, + container: string, + tail: string, + stream: string +) => { + const { data: log, refetch } = useRead( + "GetContainerLog", + { server: id, container, tail: Number(tail) }, + { refetchInterval: 30000 } + ); + return { + Log: ( +
+ +
+ ), + refetch, + stderr: !!log?.stderr, + }; +}; + +const SearchLogs = ( + id: string, + container: string, + terms: string[], + invert: boolean +) => { + const { data: log, refetch } = useRead("SearchContainerLog", { + server: id, + container, + terms, + combinator: Types.SearchCombinator.And, + invert, + }); + return { + Log: ( +
+ +
+ ), + refetch, + stderr: !!log?.stderr, + }; +}; diff --git a/frontend/src/pages/server-info/image.tsx b/frontend/src/pages/server-info/image.tsx new file mode 100644 index 000000000..ae2b208eb --- /dev/null +++ b/frontend/src/pages/server-info/image.tsx @@ -0,0 +1,217 @@ +import { Section } from "@components/layouts"; +import { ResourceLink } from "@components/resources/common"; +import { useServer } from "@components/resources/server"; +import { + ConfirmButton, + DOCKER_LINK_ICONS, + DockerContainersSection, + DockerLabelsSection, + DockerResourcePageName, +} from "@components/util"; +import { fmt_date_with_minutes, format_size_bytes } from "@lib/formatting"; +import { useExecute, useRead, useSetTitle } from "@lib/hooks"; +import { has_minimum_permissions } from "@lib/utils"; +import { Types } from "@komodo/client"; +import { Badge } from "@ui/badge"; +import { Button } from "@ui/button"; +import { DataTable } from "@ui/data-table"; +import { ChevronLeft, HistoryIcon, Info, Loader2, Trash } from "lucide-react"; +import { useNavigate, useParams } from "react-router-dom"; + +export const ImagePage = () => { + const { type, id, image } = useParams() as { + type: string; + id: string; + image: string; + }; + if (type !== "servers") { + return
This resource type does not have any images.
; + } + return ; +}; + +const ImagePageInner = ({ + id, + image: image_name, +}: { + id: string; + image: string; +}) => { + const server = useServer(id); + useSetTitle(`${server?.name} | image | ${image_name}`); + const nav = useNavigate(); + + const perms = useRead("GetPermissionLevel", { + target: { type: "Server", id }, + }).data; + + const { + data: image, + isPending, + isError, + } = useRead("InspectDockerImage", { + server: id, + image: image_name, + }); + + const containers = useRead( + "ListDockerContainers", + { + server: id, + }, + { refetchInterval: 5000 } + ).data?.filter((container) => + !image?.Id ? false : container.image_id === image?.Id + ); + + const history = useRead("ListDockerImageHistory", { + server: id, + image: image_name, + }).data; + + const { mutate: deleteImage, isPending: deletePending } = useExecute( + "DeleteImage", + { + onSuccess: () => nav("/servers/" + id), + } + ); + + if (isPending) { + return ( +
+ +
+ ); + } + + if (isError) { + return
Failed to inspect image.
; + } + + if (!image) { + return ( +
+ No image found with given name: {image_name} +
+ ); + } + + const canExecute = has_minimum_permissions( + perms, + Types.PermissionLevel.Execute + ); + + const unused = containers && containers.length === 0 ? true : false; + + return ( +
+ {/* HEADER */} +
+ {/* BACK */} +
+ +
+ + {/* TITLE */} +
+
+ +
+ + {unused && Unused} +
+ + {/* INFO */} +
+ + {image.Id ? ( + <> + | +
+ Id: +
+ {image.Id} +
+
+ + ) : null} +
+
+ + {/* MAYBE DELETE */} + {canExecute && unused && ( + } + loading={deletePending} + onClick={() => deleteImage({ server: id, name: image_name })} + /> + )} + + {containers && containers.length > 0 && ( + + )} + + {/* TOP LEVEL IMAGE INFO */} +
}> + + row.original.Size + ? format_size_bytes(row.original.Size) + : "Unknown", + }, + ]} + /> +
+ + {history && history.length > 0 && ( +
}> + + fmt_date_with_minutes(new Date(row.original.Created * 1000)), + size: 200, + }, + ]} + /> +
+ )} + + +
+ ); +}; diff --git a/frontend/src/pages/server-info/network.tsx b/frontend/src/pages/server-info/network.tsx new file mode 100644 index 000000000..e57ef9437 --- /dev/null +++ b/frontend/src/pages/server-info/network.tsx @@ -0,0 +1,282 @@ +import { Section } from "@components/layouts"; +import { ResourceLink } from "@components/resources/common"; +import { useServer } from "@components/resources/server"; +import { + ConfirmButton, + DOCKER_LINK_ICONS, + DockerLabelsSection, + DockerOptions, + DockerResourceLink, + DockerResourcePageName, +} from "@components/util"; +import { useExecute, useRead, useSetTitle } from "@lib/hooks"; +import { has_minimum_permissions } from "@lib/utils"; +import { Types } from "@komodo/client"; +import { Badge } from "@ui/badge"; +import { Button } from "@ui/button"; +import { DataTable, SortableHeader } from "@ui/data-table"; +import { + Box, + ChevronLeft, + Info, + Loader2, + Trash, + Waypoints, +} from "lucide-react"; +import { useNavigate, useParams } from "react-router-dom"; + +export const NetworkPage = () => { + const { type, id, network } = useParams() as { + type: string; + id: string; + network: string; + }; + if (type !== "servers") { + return
This resource type does not have any networks.
; + } + return ; +}; + +const NetworkPageInner = ({ + id, + network: network_name, +}: { + id: string; + network: string; +}) => { + const server = useServer(id); + useSetTitle(`${server?.name} | network | ${network_name}`); + const nav = useNavigate(); + + const perms = useRead("GetPermissionLevel", { + target: { type: "Server", id }, + }).data; + + const { + data: network, + isPending, + isError, + } = useRead("InspectDockerNetwork", { + server: id, + network: network_name, + }); + + const { mutate: deleteNetwork, isPending: deletePending } = useExecute( + "DeleteNetwork", + { + onSuccess: () => nav("/servers/" + id), + } + ); + + if (isPending) { + return ( +
+ +
+ ); + } + + if (isError) { + return
Failed to inspect network.
; + } + + if (!network) { + return ( +
+ No network found with given name: {network_name} +
+ ); + } + + const canExecute = has_minimum_permissions( + perms, + Types.PermissionLevel.Execute + ); + + const containers = Object.values(network.Containers ?? {}); + + const ipam_driver = network.IPAM?.Driver; + const ipam_config = + network.IPAM?.Config.map((config) => ({ + ...config, + Driver: ipam_driver, + })) ?? []; + + const unused = + !["none", "host", "bridge"].includes(network_name) && + containers && + containers.length === 0 + ? true + : false; + + return ( +
+ {/* HEADER */} +
+ {/* BACK */} +
+ +
+ + {/* TITLE */} +
+
+ +
+ + {unused && Unused} +
+ + {/* INFO */} +
+ | +
+ IPV6: + {network.EnableIPv6 ? "Enabled" : "Disabled"} +
+ {network.Id ? ( + <> + | +
+ Id: +
+ {network.Id} +
+
+ + ) : null} +
+
+ + {/* MAYBE DELETE */} + {canExecute && unused && ( + } + loading={deletePending} + onClick={() => deleteNetwork({ server: id, name: network_name })} + /> + )} + + {containers.length > 0 && ( +
}> + ( + + ), + cell: ({ row }) => + row.original.Name ? ( + + ) : ( + "Unknown" + ), + size: 200, + }, + { + accessorKey: "IPv4Address", + header: ({ column }) => ( + + ), + cell: ({ row }) => row.original.IPv4Address || "None", + }, + { + accessorKey: "IPv6Address", + header: ({ column }) => ( + + ), + cell: ({ row }) => row.original.IPv6Address || "None", + }, + { + accessorKey: "MacAddress", + header: ({ column }) => ( + + ), + cell: ({ row }) => row.original.MacAddress || "None", + }, + ]} + /> +
+ )} + + {/* TOP LEVEL NETWORK INFO */} +
}> + + +
+ + {ipam_config.length > 0 && ( +
}> + + +
+ )} + + +
+ ); +}; diff --git a/frontend/src/pages/server-info/volume.tsx b/frontend/src/pages/server-info/volume.tsx new file mode 100644 index 000000000..99fe3acf0 --- /dev/null +++ b/frontend/src/pages/server-info/volume.tsx @@ -0,0 +1,180 @@ +import { Section } from "@components/layouts"; +import { ResourceLink } from "@components/resources/common"; +import { useServer } from "@components/resources/server"; +import { + ConfirmButton, + DOCKER_LINK_ICONS, + DockerContainersSection, + DockerLabelsSection, + DockerOptions, + DockerResourcePageName, +} from "@components/util"; +import { useExecute, useRead, useSetTitle } from "@lib/hooks"; +import { has_minimum_permissions } from "@lib/utils"; +import { Types } from "@komodo/client"; +import { Badge } from "@ui/badge"; +import { Button } from "@ui/button"; +import { DataTable } from "@ui/data-table"; +import { ChevronLeft, Info, Loader2, Trash } from "lucide-react"; +import { useNavigate, useParams } from "react-router-dom"; + +export const VolumePage = () => { + const { type, id, volume } = useParams() as { + type: string; + id: string; + volume: string; + }; + if (type !== "servers") { + return
This resource type does not have any volumes.
; + } + return ; +}; + +const VolumePageInner = ({ + id, + volume: volume_name, +}: { + id: string; + volume: string; +}) => { + const server = useServer(id); + useSetTitle(`${server?.name} | volume | ${volume_name}`); + const nav = useNavigate(); + + const perms = useRead("GetPermissionLevel", { + target: { type: "Server", id }, + }).data; + + const { + data: volume, + isPending, + isError, + } = useRead("InspectDockerVolume", { + server: id, + volume: volume_name, + }); + + const containers = useRead( + "ListDockerContainers", + { + server: id, + }, + { refetchInterval: 5000 } + ).data?.filter((container) => container.volumes?.includes(volume_name)); + + const { mutate: deleteVolume, isPending: deletePending } = useExecute( + "DeleteVolume", + { + onSuccess: () => nav("/servers/" + id), + } + ); + + if (isPending) { + return ( +
+ +
+ ); + } + + if (isError) { + return
Failed to inspect volume.
; + } + + if (!volume) { + return ( +
+ No volume found with given name: {volume_name} +
+ ); + } + + const canExecute = has_minimum_permissions( + perms, + Types.PermissionLevel.Execute + ); + + const unused = containers && containers.length === 0 ? true : false; + + return ( +
+ {/* HEADER */} +
+ {/* BACK */} +
+ +
+ + {/* TITLE */} +
+
+ +
+ + {containers && containers.length === 0 && ( + Unused + )} +
+ + {/* INFO */} +
+ +
+
+ + {/* MAYBE DELETE */} + {canExecute && unused && ( + } + loading={deletePending} + onClick={() => deleteVolume({ server: id, name: volume_name })} + /> + )} + + {containers && containers.length > 0 && ( + + )} + + {/* TOP LEVEL VOLUME INFO */} +
}> + + +
+ + +
+ ); +}; diff --git a/frontend/src/pages/settings/providers.tsx b/frontend/src/pages/settings/providers.tsx index 9cf2b9a0c..7afa0b824 100644 --- a/frontend/src/pages/settings/providers.tsx +++ b/frontend/src/pages/settings/providers.tsx @@ -6,7 +6,7 @@ import { useUser, useWrite, } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Card } from "@ui/card"; import { DataTable, SortableHeader } from "@ui/data-table"; @@ -125,7 +125,7 @@ const Providers = ({ type }: { type: "GitProvider" | "DockerRegistry" }) => { setUpdateMenuData({ title: "Set Domain", value: row.original.domain ?? "", - placeholder: "Input domain, eg. git.monitor.dev", + placeholder: "Input domain, eg. git.komo.do", titleRight: type === "GitProvider" ? ( diff --git a/frontend/src/pages/settings/tags.tsx b/frontend/src/pages/settings/tags.tsx index 60af69a21..7a7cf5345 100644 --- a/frontend/src/pages/settings/tags.tsx +++ b/frontend/src/pages/settings/tags.tsx @@ -99,15 +99,6 @@ export const CreateTag = () => { toast({ title: "Tag Created" }); setOpen(false); }, - onError: (e) => { - console.log("create tag error:" + e); - toast({ - title: "Failed to create tag", - description: "See console for details", - variant: "destructive", - }); - setOpen(false); - }, }); const submit = () => mutate({ name }); return ( @@ -154,14 +145,6 @@ const DeleteTag = ({ tag_id }: { tag_id: string }) => { invalidate(["ListTags"]); toast({ title: "Tag Deleted" }); }, - onError: (e) => { - console.log("delete tag error:" + e); - toast({ - title: "Failed to delete tag", - description: "See console for details", - variant: "destructive", - }); - }, }); return ( { toast({ title: "Updated variable description" }); }, }); + const { mutate: updateIsSecret } = useWrite("UpdateVariableIsSecret", { + onSuccess: () => { + inv(["ListVariables"], ["GetVariable"]); + toast({ title: "Updated variable 'is secret'" }); + }, + }); return (
@@ -93,90 +100,105 @@ export const Variables = () => { )} {/** VARIABLES */} - ( - - ), - }, - { - accessorKey: "value", - size: 300, - header: ({ column }) => ( - - ), - cell: ({ row }) => { - return ( -
+
+ ( + + ), + }, + { + accessorKey: "value", + size: 300, + header: ({ column }) => ( + + ), + cell: ({ row }) => { + return ( +
+ { + setUpdateMenuData({ + title: `${row.original.name} - Value`, + value: row.original.value ?? "", + placeholder: "Set value", + onUpdate: (value) => { + if (row.original.value === value) { + return; + } + updateValue({ name: row.original.name, value }); + }, + }); + }} + > +
+ {row.original.value || "Set value"} +
+
+ +
+ ); + }, + }, + { + accessorKey: "description", + size: 200, + header: "Description", + cell: ({ row }) => { + return ( { setUpdateMenuData({ - title: `${row.original.name} - Value`, - value: row.original.value ?? "", - placeholder: "Set value", - onUpdate: (value) => { - if (row.original.value === value) { + title: `${row.original.name} - Description`, + value: row.original.description ?? "", + placeholder: "Set description", + onUpdate: (description) => { + if (row.original.description === description) { return; } - updateValue({ name: row.original.name, value }); + updateDescription({ + name: row.original.name, + description, + }); }, }); }} > -
- {row.original.value || "Set value"} +
+ {row.original.description || "Set description"}
- -
- ); + ); + }, }, - }, - { - accessorKey: "description", - size: 200, - header: "Description", - cell: ({ row }) => { - return ( - { - setUpdateMenuData({ - title: `${row.original.name} - Description`, - value: row.original.description ?? "", - placeholder: "Set description", - onUpdate: (description) => { - if (row.original.description === description) { - return; - } - updateDescription({ - name: row.original.name, - description, - }); - }, - }); - }} - > -
- {row.original.description || "Set description"} -
-
- ); + { + header: "Secret", + size: 100, + cell: ({ row }) => ( + + updateIsSecret({ name: row.original.name, is_secret }) + } + disabled={disabled} + /> + ), }, - }, - { - header: "Delete", - maxSize: 200, - cell: ({ row }) => , - }, - ]} - /> + { + header: "Delete", + cell: ({ row }) => , + minSize: 200, + }, + ]} + /> +
{/** SECRETS */} {secrets.length ? ( diff --git a/frontend/src/pages/stack-service/index.tsx b/frontend/src/pages/stack-service/index.tsx index 8d604264d..9cbef6962 100644 --- a/frontend/src/pages/stack-service/index.tsx +++ b/frontend/src/pages/stack-service/index.tsx @@ -10,20 +10,20 @@ import { StartStopStack, } from "@components/resources/stack/actions"; import { - deployment_state_intention, + container_state_intention, stroke_color_class_by_intention, } from "@lib/color"; import { useRead, useSetTitle } from "@lib/hooks"; import { cn, has_minimum_permissions } from "@lib/utils"; -import { Types } from "@monitor/client"; -import { Box, ChevronLeft, Clapperboard, Layers2 } from "lucide-react"; +import { Types } from "@komodo/client"; +import { ChevronLeft, Clapperboard, Layers2 } from "lucide-react"; import { useNavigate, useParams } from "react-router-dom"; import { StackServiceLogs } from "./log"; import { ResourceUpdates } from "@components/updates/resource"; import { Button } from "@ui/button"; import { ExportButton } from "@components/export"; import { AddTags, ResourceTags } from "@components/tags"; -import { StatusBadge } from "@components/util"; +import { DockerResourceLink, StatusBadge } from "@components/util"; type IdServiceComponent = React.FC<{ id: string; service?: string }>; @@ -65,8 +65,8 @@ const StackServicePageInner = ({ const canWrite = has_minimum_permissions(perms, Types.PermissionLevel.Write); const services = useRead("ListStackServices", { stack: stack_id }).data; const container = services?.find((s) => s.service === service)?.container; - const state = container?.state ?? Types.DeploymentState.Unknown; - const intention = deployment_state_intention(state); + const state = container?.state ?? Types.ContainerStateStatusEnum.Empty; + const intention = container_state_intention(state); const stroke_color = stroke_color_class_by_intention(intention); return ( @@ -115,13 +115,15 @@ const StackServicePageInner = ({ )} - {container && container?.name !== service && ( + {stack?.info.server_id && container && container.name && ( <> | -
- - {container.name} -
+ )}
diff --git a/frontend/src/pages/stack-service/log.tsx b/frontend/src/pages/stack-service/log.tsx index c0a9e7f90..9173ad14d 100644 --- a/frontend/src/pages/stack-service/log.tsx +++ b/frontend/src/pages/stack-service/log.tsx @@ -1,6 +1,6 @@ import { Section } from "@components/layouts"; import { useRead } from "@lib/hooks"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { RefreshCw, X, AlertOctagon, ScrollText } from "lucide-react"; import { useState } from "react"; @@ -21,13 +21,9 @@ export const StackServiceLogs = ({ // const stack = useStack(id); const services = useRead("ListStackServices", { stack: id }).data; const container = services?.find((s) => s.service === service)?.container; - const state = container?.state ?? Types.DeploymentState.Unknown; + const state = container?.state ?? Types.ContainerStateStatusEnum.Empty; - if ( - state === undefined || - state === Types.DeploymentState.Unknown || - state === Types.DeploymentState.NotDeployed - ) { + if (state === undefined || state === Types.ContainerStateStatusEnum.Empty) { return null; } @@ -73,7 +69,7 @@ const StackLogsInner = ({
} - itemsCenterTitleRow + itemsCenterTitleRow actions={
diff --git a/frontend/src/pages/updates.tsx b/frontend/src/pages/updates.tsx index fc5847f73..c58514bd5 100644 --- a/frontend/src/pages/updates.tsx +++ b/frontend/src/pages/updates.tsx @@ -3,7 +3,7 @@ import { ResourceComponents } from "@components/resources"; import { UpdatesTable } from "@components/updates/table"; import { useRead, useResourceParamType, useSetTitle } from "@lib/hooks"; import { filterBySplit } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { CaretSortIcon } from "@radix-ui/react-icons"; import { UsableResource } from "@types"; import { Button } from "@ui/button"; @@ -141,9 +141,24 @@ const OPERATIONS_BY_RESOURCE: { [key: string]: Types.Operation[] } = { Types.Operation.UpdateDeployment, Types.Operation.RenameDeployment, Types.Operation.Deploy, - Types.Operation.StopContainer, - Types.Operation.StartContainer, - Types.Operation.RemoveContainer, + Types.Operation.StartDeployment, + Types.Operation.RestartDeployment, + Types.Operation.PauseDeployment, + Types.Operation.UnpauseDeployment, + Types.Operation.StopDeployment, + Types.Operation.DestroyDeployment, + ], + Stack: [ + Types.Operation.CreateStack, + Types.Operation.UpdateStack, + Types.Operation.RenameStack, + Types.Operation.DeployStack, + Types.Operation.StartStack, + Types.Operation.RestartStack, + Types.Operation.PauseStack, + Types.Operation.UnpauseStack, + Types.Operation.StopStack, + Types.Operation.DestroyStack, ], Build: [ Types.Operation.CreateBuild, @@ -156,6 +171,7 @@ const OPERATIONS_BY_RESOURCE: { [key: string]: Types.Operation[] } = { Types.Operation.UpdateRepo, Types.Operation.CloneRepo, Types.Operation.PullRepo, + Types.Operation.BuildRepo, ], Procedure: [ Types.Operation.CreateProcedure, diff --git a/frontend/src/pages/user-group.tsx b/frontend/src/pages/user-group.tsx index b50b56a89..667eae185 100644 --- a/frontend/src/pages/user-group.tsx +++ b/frontend/src/pages/user-group.tsx @@ -6,7 +6,7 @@ import { UserTable } from "@components/users/table"; import { ActionWithDialog } from "@components/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; import { filterBySplit } from "@lib/utils"; -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; import { Button } from "@ui/button"; import { Command, diff --git a/frontend/src/router.tsx b/frontend/src/router.tsx index 47aeb0d51..f7586814f 100644 --- a/frontend/src/router.tsx +++ b/frontend/src/router.tsx @@ -15,6 +15,10 @@ import { UserPage } from "@pages/user"; import { UserGroupPage } from "@pages/user-group"; import { Settings } from "@pages/settings"; import { StackServicePage } from "@pages/stack-service"; +import { NetworkPage } from "@pages/server-info/network"; +import { ImagePage } from "@pages/server-info/image"; +import { VolumePage } from "@pages/server-info/volume"; +import { ContainerPage } from "@pages/server-info/container"; const ROUTER = createBrowserRouter([ { @@ -50,6 +54,22 @@ const ROUTER = createBrowserRouter([ path: ":id/service/:service", element: , }, + { + path: ":id/container/:container", + element: , + }, + { + path: ":id/network/:network", + element: , + }, + { + path: ":id/image/:image", + element: , + }, + { + path: ":id/volume/:volume", + element: , + }, ], }, ], diff --git a/frontend/src/types.d.ts b/frontend/src/types.d.ts index 6f71d2511..5bf9ba416 100644 --- a/frontend/src/types.d.ts +++ b/frontend/src/types.d.ts @@ -1,4 +1,4 @@ -import { Types } from "@monitor/client"; +import { Types } from "@komodo/client"; export type UsableResource = Exclude; diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 817c8b840..df80ab5ef 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -2253,10 +2253,10 @@ lru-cache@^6.0.0: dependencies: yallist "^4.0.0" -lucide-react@0.427.0: - version "0.427.0" - resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.427.0.tgz#e06974514bbd591049f9d736b3d3ae99d4ede8c9" - integrity sha512-lv9s6c5BDF/ccuA0EgTdskTxIe11qpwBDmzRZHJAKtp8LTewAvDvOM+pTES9IpbBuTqkjiMhOmGpJ/CB+mKjFw== +lucide-react@0.437.0: + version "0.437.0" + resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.437.0.tgz#d0bef1567b867693d21b179763bfa1e0199b8fef" + integrity sha512-RXQq6tnm1FlXDUtOwLaoXET2TOEGpQULrQlPOjGHgIVsPhicHNat9sWF33OAe2UCLMFiWF1oL+FtAg43BqVY4Q== merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" diff --git a/monitor.code-workspace b/komodo.code-workspace similarity index 100% rename from monitor.code-workspace rename to komodo.code-workspace diff --git a/lib/command/Cargo.toml b/lib/command/Cargo.toml index 232420aeb..2f28fd1d3 100644 --- a/lib/command/Cargo.toml +++ b/lib/command/Cargo.toml @@ -8,5 +8,5 @@ repository.workspace = true homepage.workspace = true [dependencies] -monitor_client.workspace = true +komodo_client.workspace = true run_command.workspace = true \ No newline at end of file diff --git a/lib/command/src/lib.rs b/lib/command/src/lib.rs index 0816ab73c..f20a4e5cc 100644 --- a/lib/command/src/lib.rs +++ b/lib/command/src/lib.rs @@ -1,11 +1,8 @@ -use monitor_client::entities::{monitor_timestamp, update::Log}; +use komodo_client::entities::{komodo_timestamp, update::Log}; use run_command::{async_run_command, CommandOutput}; -pub async fn run_monitor_command( - stage: &str, - command: String, -) -> Log { - let start_ts = monitor_timestamp(); +pub async fn run_komodo_command(stage: &str, command: String) -> Log { + let start_ts = komodo_timestamp(); let output = async_run_command(&command).await; output_into_log(stage, command, start_ts, output) } @@ -24,6 +21,6 @@ pub fn output_into_log( command, success, start_ts, - end_ts: monitor_timestamp(), + end_ts: komodo_timestamp(), } } diff --git a/lib/git/Cargo.toml b/lib/git/Cargo.toml index 502f2f92a..2e1f97ced 100644 --- a/lib/git/Cargo.toml +++ b/lib/git/Cargo.toml @@ -8,7 +8,7 @@ repository.workspace = true homepage.workspace = true [dependencies] -monitor_client.workspace = true +komodo_client.workspace = true formatting.workspace = true command.workspace = true # diff --git a/lib/git/src/lib.rs b/lib/git/src/lib.rs index a05465d7e..675cb508c 100644 --- a/lib/git/src/lib.rs +++ b/lib/git/src/lib.rs @@ -5,11 +5,11 @@ use std::{ }; use anyhow::Context; -use command::run_monitor_command; +use command::run_komodo_command; use formatting::{bold, format_serror, muted}; -use monitor_client::entities::{ - all_logs_success, environment_vars_to_string, monitor_timestamp, - to_monitor_name, update::Log, CloneArgs, EnvironmentVar, +use komodo_client::entities::{ + all_logs_success, environment_vars_to_string, komodo_timestamp, + to_komodo_name, update::Log, CloneArgs, EnvironmentVar, LatestCommit, SystemCommand, }; use run_command::async_run_command; @@ -17,7 +17,11 @@ use tokio::fs; use tracing::instrument; /// Return (logs, commit hash, commit msg) -#[tracing::instrument(level = "debug")] +#[tracing::instrument( + level = "debug", + skip(environment, secrets, on_pull, core_replacers) +)] +#[allow(clippy::too_many_arguments)] pub async fn pull( path: &Path, branch: &Option, @@ -27,6 +31,7 @@ pub async fn pull( env_file_path: &str, // if skip_secret_interp is none, make sure to pass None here secrets: Option<&HashMap>, + core_replacers: &[(String, String)], ) -> (Vec, Option, Option, Option) { let branch = match branch { Some(branch) => branch.to_owned(), @@ -36,7 +41,7 @@ pub async fn pull( let command = format!("cd {} && git pull -f origin {branch}", path.display()); - let pull_log = run_monitor_command("git pull", command).await; + let pull_log = run_komodo_command("git pull", command).await; let mut logs = vec![pull_log]; @@ -45,7 +50,7 @@ pub async fn pull( } if let Some(commit) = commit { - let reset_log = run_monitor_command( + let reset_log = run_komodo_command( "set commit", format!("cd {} && git reset --hard {commit}", path.display()), ) @@ -81,15 +86,67 @@ pub async fn pull( return (logs, hash, message, None); }; - if let Some(on_pull) = on_pull { - if !on_pull.path.is_empty() && !on_pull.command.is_empty() { - let path = path.join(&on_pull.path); - let on_pull_log = run_monitor_command( - "on pull", - format!("cd {} && {}", path.display(), on_pull.command), - ) - .await; - logs.push(on_pull_log); + if let Some(command) = on_pull { + if !command.path.is_empty() && !command.command.is_empty() { + let on_pull_path = path.join(&command.path); + if let Some(secrets) = secrets { + let (full_command, mut replacers) = + match svi::interpolate_variables( + &command.command, + secrets, + svi::Interpolator::DoubleBrackets, + true, + ) + .context( + "failed to interpolate secrets into on_pull command", + ) { + Ok(res) => res, + Err(e) => { + logs.push(Log::error( + "interpolate secrets - on_pull", + format_serror(&e.into()), + )); + return (logs, hash, message, None); + } + }; + replacers.extend(core_replacers.to_owned()); + let mut on_pull_log = run_komodo_command( + "on pull", + format!("cd {} && {full_command}", on_pull_path.display()), + ) + .await; + + on_pull_log.command = + svi::replace_in_string(&on_pull_log.command, &replacers); + on_pull_log.stdout = + svi::replace_in_string(&on_pull_log.stdout, &replacers); + on_pull_log.stderr = + svi::replace_in_string(&on_pull_log.stderr, &replacers); + + tracing::debug!( + "run repo on_pull command | command: {} | cwd: {:?}", + on_pull_log.command, + on_pull_path + ); + + logs.push(on_pull_log); + } else { + let on_pull_log = run_komodo_command( + "on pull", + format!( + "cd {} && {}", + on_pull_path.display(), + command.command + ), + ) + .await; + tracing::debug!( + "run repo on_pull command | command: {} | cwd: {:?}", + command.command, + on_pull_path + ); + logs.push(on_pull_log); + } } } @@ -101,7 +158,16 @@ pub type CloneRes = (Vec, Option, Option, Option); /// returns (logs, commit hash, commit message, env_file_path) -#[tracing::instrument(level = "debug", skip(access_token))] +#[tracing::instrument( + level = "debug", + skip( + clone_args, + access_token, + environment, + secrets, + core_replacers + ) +)] pub async fn clone( clone_args: T, repo_dir: &Path, @@ -110,6 +176,7 @@ pub async fn clone( env_file_path: &str, // if skip_secret_interp is none, make sure to pass None here secrets: Option<&HashMap>, + core_replacers: &[(String, String)], ) -> anyhow::Result where T: Into + std::fmt::Debug, @@ -132,7 +199,7 @@ where .context("resource has no provider attached")?; let repo = repo.as_ref().context("resource has no repo attached")?; - let name = to_monitor_name(&name); + let name = to_komodo_name(&name); let repo_dir = match destination { Some(destination) => PathBuf::from_str(&destination) @@ -189,41 +256,109 @@ where if let Some(command) = on_clone { if !command.path.is_empty() && !command.command.is_empty() { let on_clone_path = repo_dir.join(&command.path); - let on_clone_log = run_monitor_command( - "on clone", - format!( - "cd {} && {}", - on_clone_path.display(), - command.command - ), - ) - .await; - tracing::debug!( - "run repo on_clone command | command: {} | cwd: {:?}", - command.command, - on_clone_path - ); - logs.push(on_clone_log); + if let Some(secrets) = secrets { + let (full_command, mut replacers) = + svi::interpolate_variables( + &command.command, + secrets, + svi::Interpolator::DoubleBrackets, + true, + ) + .context( + "failed to interpolate secrets into on_clone command", + )?; + replacers.extend(core_replacers.to_owned()); + let mut on_clone_log = run_komodo_command( + "on clone", + format!("cd {} && {full_command}", on_clone_path.display()), + ) + .await; + + on_clone_log.command = + svi::replace_in_string(&on_clone_log.command, &replacers); + on_clone_log.stdout = + svi::replace_in_string(&on_clone_log.stdout, &replacers); + on_clone_log.stderr = + svi::replace_in_string(&on_clone_log.stderr, &replacers); + + tracing::debug!( + "run repo on_clone command | command: {} | cwd: {:?}", + on_clone_log.command, + on_clone_path + ); + + logs.push(on_clone_log); + } else { + let on_clone_log = run_komodo_command( + "on clone", + format!( + "cd {} && {}", + on_clone_path.display(), + command.command + ), + ) + .await; + tracing::debug!( + "run repo on_clone command | command: {} | cwd: {:?}", + command.command, + on_clone_path + ); + logs.push(on_clone_log); + } } } if let Some(command) = on_pull { if !command.path.is_empty() && !command.command.is_empty() { let on_pull_path = repo_dir.join(&command.path); - let on_pull_log = run_monitor_command( - "on pull", - format!( - "cd {} && {}", - on_pull_path.display(), - command.command - ), - ) - .await; - tracing::debug!( - "run repo on_pull command | command: {} | cwd: {:?}", - command.command, - on_pull_path - ); - logs.push(on_pull_log); + if let Some(secrets) = secrets { + let (full_command, mut replacers) = + svi::interpolate_variables( + &command.command, + secrets, + svi::Interpolator::DoubleBrackets, + true, + ) + .context( + "failed to interpolate secrets into on_pull command", + )?; + replacers.extend(core_replacers.to_owned()); + let mut on_pull_log = run_komodo_command( + "on pull", + format!("cd {} && {full_command}", on_pull_path.display()), + ) + .await; + + on_pull_log.command = + svi::replace_in_string(&on_pull_log.command, &replacers); + on_pull_log.stdout = + svi::replace_in_string(&on_pull_log.stdout, &replacers); + on_pull_log.stderr = + svi::replace_in_string(&on_pull_log.stderr, &replacers); + + tracing::debug!( + "run repo on_pull command | command: {} | cwd: {:?}", + on_pull_log.command, + on_pull_path + ); + + logs.push(on_pull_log); + } else { + let on_pull_log = run_komodo_command( + "on pull", + format!( + "cd {} && {}", + on_pull_path.display(), + command.command + ), + ) + .await; + tracing::debug!( + "run repo on_pull command | command: {} | cwd: {:?}", + command.command, + on_pull_path + ); + logs.push(on_pull_log); + } } } @@ -257,7 +392,7 @@ async fn clone_inner( format!("{protocol}://{access_token_at}{provider}/{repo}.git"); let command = format!("git clone {repo_url} {}{branch}", destination.display()); - let start_ts = monitor_timestamp(); + let start_ts = komodo_timestamp(); let output = async_run_command(&command).await; let success = output.success(); let (command, stderr) = if !access_token_at.is_empty() { @@ -277,7 +412,7 @@ async fn clone_inner( stdout: output.stdout, stderr, start_ts, - end_ts: monitor_timestamp(), + end_ts: komodo_timestamp(), }]; if !logs[0].success { @@ -285,7 +420,7 @@ async fn clone_inner( } if let Some(commit) = commit { - let reset_log = run_monitor_command( + let reset_log = run_komodo_command( "set commit", format!( "cd {} && git reset --hard {commit}", @@ -325,7 +460,7 @@ pub async fn get_commit_hash_info( pub async fn get_commit_hash_log( repo_dir: &Path, ) -> anyhow::Result<(Log, String, String)> { - let start_ts = monitor_timestamp(); + let start_ts = komodo_timestamp(); let command = format!("cd {} && git rev-parse --short HEAD && git rev-parse HEAD && git log -1 --pretty=%B", repo_dir.display()); let output = async_run_command(&command).await; let mut split = output.stdout.split('\n'); @@ -353,7 +488,7 @@ pub async fn get_commit_hash_log( stderr: String::new(), success: true, start_ts, - end_ts: monitor_timestamp(), + end_ts: komodo_timestamp(), }; Ok((log, short_hash, msg)) } diff --git a/lib/logger/Cargo.toml b/lib/logger/Cargo.toml index c76a6fb9e..db8167f83 100644 --- a/lib/logger/Cargo.toml +++ b/lib/logger/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true [dependencies] # local client -monitor_client.workspace = true +komodo_client.workspace = true # external anyhow.workspace = true tracing.workspace = true diff --git a/lib/logger/src/lib.rs b/lib/logger/src/lib.rs index 892c0c1b7..c6823352f 100644 --- a/lib/logger/src/lib.rs +++ b/lib/logger/src/lib.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use monitor_client::entities::logger::{LogConfig, StdioLogMode}; +use komodo_client::entities::logger::{LogConfig, StdioLogMode}; use tracing::level_filters::LevelFilter; use tracing_opentelemetry::OpenTelemetryLayer; use tracing_subscriber::{ diff --git a/readme.md b/readme.md index 0374e508b..82d4056ed 100644 --- a/readme.md +++ b/readme.md @@ -1,13 +1,17 @@ -# Monitor 🦎 +# Komodo 🦎 A tool to build and deploy software across many servers. -🦎 [See the docs](https://docs.monitor.dev) +🦎 [See the docs](https://komo.do) -🦎 [Try the Demo](https://demo.monitor.dev) +🦎 [Try the Demo](https://demo.komo.do) 🦎 [Join the Discord](https://discord.gg/DRqE8Fvg5c) +## About + +The Komodo dragon is the largest living member of the [*Monitor* family of lizards](https://en.wikipedia.org/wiki/Monitor_lizard). + ## Disclaimer Warning. This is open source software (GPL-V3), and while we make a best effort to ensure releases are stable and bug-free, @@ -15,35 +19,35 @@ there are no warranties. Use at your own risk. ## Links -- [periphery setup](https://github.com/mbecker20/monitor/blob/main/scripts/readme.md) -- [roadmap](https://github.com/mbecker20/monitor/blob/main/roadmap.md) -- [changelog](https://github.com/mbecker20/monitor/blob/main/changelog.md) -- [migrator](https://github.com/mbecker20/monitor/blob/main/bin/migrator/README.md) +- [periphery setup](https://github.com/mbecker20/komodo/blob/main/scripts/readme.md) +- [roadmap](https://github.com/mbecker20/komodo/blob/main/roadmap.md) +- [changelog](https://github.com/mbecker20/komodo/blob/main/changelog.md) +- [migrator](https://github.com/mbecker20/komodo/blob/main/bin/migrator/README.md) ## Screenshots ### Light Theme -![Dashboard](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Dashboard.png) -![Resources](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Resources.png) -![Deployment](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Deployment.png) -![Server](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Server.png) -![Sync](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Sync.png) -![Procedure](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Procedure.png) -![UserGroup](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-UserGroup.png) -![Update](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Update.png) -![Search](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Search.png) -![Export](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Light-Export.png) +![Dashboard](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Dashboard.png) +![Resources](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Resources.png) +![Deployment](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Deployment.png) +![Server](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Server.png) +![Sync](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Sync.png) +![Procedure](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Procedure.png) +![UserGroup](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-UserGroup.png) +![Update](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Update.png) +![Search](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Search.png) +![Export](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Light-Export.png) ### Dark Theme -![Dashboard](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Dashboard.png) -![Resources](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Resources.png) -![Deployment](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Deployment.png) -![Server](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Server.png) -![Sync](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Sync.png) -![Procedure](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Procedure.png) -![UserGroup](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-UserGroup.png) -![Update](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Update.png) -![Search](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Search.png) -![Export](https://raw.githubusercontent.com/mbecker20/monitor/main/screenshots/Dark-Export.png) +![Dashboard](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Dashboard.png) +![Resources](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Resources.png) +![Deployment](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Deployment.png) +![Server](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Server.png) +![Sync](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Sync.png) +![Procedure](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Procedure.png) +![UserGroup](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-UserGroup.png) +![Update](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Update.png) +![Search](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Search.png) +![Export](https://raw.githubusercontent.com/mbecker20/komodo/main/screenshots/Dark-Export.png) diff --git a/roadmap.md b/roadmap.md index 09141c22d..06afdeb4a 100644 --- a/roadmap.md +++ b/roadmap.md @@ -2,13 +2,13 @@ In order to clarify the goals and invite community participation in the direction of the project, this document will serve as a roadmap for upcoming features / releases. -If you have an idea for Monitor, feel free to open an issue beginning with the `[Request]` tag. The community is also encouraged to open PRs fulfilling the goals of any planned release. +If you have an idea for Komodo, feel free to open an issue beginning with the `[Request]` tag. The community is also encouraged to open PRs fulfilling the goals of any planned release. ## Release plans - **v1.12**: Support any git provider / docker registry (supports self-hosted providers like Gitea) ✅ - **v1.13**: Support "Compose" resource - Paste in a docker compose file and manage it like a Portainer "Stack" ✅ -- **v1.14**: Manage docker networks, images, volumes in the UI +- **v1.14**: Manage docker networks, images, volumes in the UI ✅ - **v1.15**: Support generic OAuth2 providers (including self-hosted). - **v1.16**: Support "Swarm" resource - Manage docker swarms, attach Deployments / Stacks to "Swarm". - **v1.17+**: Support "Cluster" resource - Manage Kubernetes cluster, can attach deployments to "Cluster" (in addition to existing "Server") diff --git a/runfile.toml b/runfile.toml index 7988201d2..5566b06ae 100644 --- a/runfile.toml +++ b/runfile.toml @@ -19,20 +19,17 @@ cmd = "yarn build" description = "deploys test.compose.yaml" cmd = """ docker compose -f test.compose.yaml down && \ -docker compose -f test.compose.yaml up --attach monitor-periphery""" +docker compose -f test.compose.yaml up -d""" + +[test-compose-build] +description = "builds and deploys test.compose.yaml" +cmd = """ +docker compose -f test.compose.yaml down && \ +docker compose -f test.compose.yaml up -d --build""" [test-core] description = "runs core --release pointing to test.core.config.toml" -cmd = "MONITOR_CONFIG_PATH=test.core.config.toml cargo run -p monitor_core --release" - -[update-periphery] -path = "." -cmd = """ -cargo build -p monitor_periphery --release && \ -systemctl stop periphery && \ -cp ./target/release/periphery /usr/local/bin/periphery && \ -chmod +x /usr/local/bin/periphery && \ -systemctl start periphery""" +cmd = "KOMODO_CONFIG_PATH=test.core.config.toml cargo run -p komodo_core --release" [docsite-start] path = "docsite" @@ -43,4 +40,4 @@ path = "docsite" cmd = "yarn deploy" [rustdoc-server] -cmd = "cargo watch -s 'cargo doc --no-deps -p monitor_client' & http --quiet target/doc" \ No newline at end of file +cmd = "cargo watch -s 'cargo doc --no-deps -p komodo_client' & http --quiet target/doc" \ No newline at end of file diff --git a/scripts/readme.md b/scripts/readme.md index b7c86e7dd..9234fc029 100644 --- a/scripts/readme.md +++ b/scripts/readme.md @@ -1,8 +1,8 @@ # Periphery setup script -These scripts will set up Monitor Periphery on your hosts, managed by systemd. +These scripts will set up Komodo Periphery on your hosts, managed by systemd. -*Note*. This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Monitor version release, and it will update the periphery version. +*Note*. This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Komodo version release, and it will update the periphery version. *Note*. The script can usually detect aarch64 system and use the periphery-aarch64 binary. @@ -13,26 +13,26 @@ There's two ways to install periphery: `System` and `User` Note. Run this after switching to root user (eg `sudo su -`). ```sh -curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 +curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3 ``` Will install to paths: - periphery (binary) -> `/usr/local/bin/periphery` - periphery.service -> `/etc/systemd/system/periphery.service` -- periphery.config.toml -> `/etc/monitor/periphery.config.toml` +- periphery.config.toml -> `/etc/komodo/periphery.config.toml` ## User *Note*. The user running periphery must be a member of the docker group, in order to use the docker cli without sudo. -*Note*. Ensure the user running periphery has write access to the configure [repo directory](https://github.com/mbecker20/monitor/blob/5f0a9ad65228a5c43bdcdfd5335c6e4bf2112591/config_example/periphery.config.example.toml#L5). +*Note*. Ensure the user running periphery has write access to the configure [repo directory](https://github.com/mbecker20/komodo/blob/5f0a9ad65228a5c43bdcdfd5335c6e4bf2112591/config_example/periphery.config.example.toml#L5). This allows periphery to clone repos and write compose files. ```sh -curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 - --user +curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3 - --user ``` Will install to paths: - periphery (binary) -> $HOME/.local/bin - periphery.service -> $HOME/.config/systemd/user/periphery.service -- periphery.config.toml -> $HOME/.config/monitor/periphery.config.toml \ No newline at end of file +- periphery.config.toml -> $HOME/.config/komodo/periphery.config.toml \ No newline at end of file diff --git a/scripts/setup-periphery.py b/scripts/setup-periphery.py index d38152122..17adfdf70 100644 --- a/scripts/setup-periphery.py +++ b/scripts/setup-periphery.py @@ -14,7 +14,7 @@ def load_version(): return version def load_latest_version(): - return json.load(urllib.request.urlopen("https://api.github.com/repos/mbecker20/monitor/releases/latest"))["tag_name"] + return json.load(urllib.request.urlopen("https://api.github.com/repos/mbecker20/komodo/releases/latest"))["tag_name"] def load_paths(): # Checks if setup.py is passed --user arg @@ -22,21 +22,21 @@ def load_paths(): if user_install: home_dir = os.environ['HOME'] return [ - user_install, + True, # binary location f'{home_dir}/.local/bin', # config location - f'{home_dir}/.config/monitor', + f'{home_dir}/.config/komodo', # service file location f'{home_dir}/.config/systemd/user', ] else: return [ - user_install, + False, # binary location "/usr/local/bin", # config location - "/etc/monitor", + "/etc/komodo", # service file location "/etc/systemd/system", ] @@ -64,7 +64,7 @@ def copy_binary(user_install, bin_dir, version): periphery_bin = "periphery-aarch64" # download the binary to bin path - print(os.popen(f'curl -sSL https://github.com/mbecker20/monitor/releases/download/{version}/{periphery_bin} > {bin_path}').read()) + print(os.popen(f'curl -sSL https://github.com/mbecker20/komodo/releases/download/{version}/{periphery_bin} > {bin_path}').read()) # add executable permissions os.popen(f'chmod +x {bin_path}') @@ -83,13 +83,15 @@ def copy_config(config_dir): if not os.path.isdir(config_dir): os.makedirs(config_dir) - print(os.popen(f'curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/config_example/periphery.config.example.toml > {config_dir}/periphery.config.toml').read()) + print(os.popen(f'curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/config_example/periphery.config.example.toml > {config_dir}/periphery.config.toml').read()) -def copy_service_file(bin_dir, config_dir, service_dir): +def copy_service_file(bin_dir, config_dir, service_dir, user_install): service_file = f'{service_dir}/periphery.service' + force_service_recopy = sys.argv.count("--force-service-file") > 0 + # early return is service file already exists - if os.path.isfile(service_file): + if not force_service_recopy and os.path.isfile(service_file): print("service file already exists, skipping...") return @@ -102,7 +104,7 @@ def copy_service_file(bin_dir, config_dir, service_dir): f = open(service_file, "x") f.write(( "[Unit]\n" - "Description=agent to connect with monitor core\n" + "Description=agent to connect with Komodo Core\n" "\n" "[Service]\n" f'ExecStart={bin_dir}/periphery --config-path {config_dir}/periphery.config.toml\n' @@ -112,6 +114,11 @@ def copy_service_file(bin_dir, config_dir, service_dir): "[Install]\n" "WantedBy=default.target" )) + + user = "" + if user_install: + user = " --user" + os.popen(f'systemctl{user} daemon-reload') def main(): print("=====================") @@ -127,9 +134,13 @@ def main(): print(f'config dir: {config_dir}') print(f'service file dir: {service_dir}') + force_service_recopy = sys.argv.count("--force-service-file") > 0 + if force_service_recopy: + print('forcing service file rewrite') + copy_binary(user_install, bin_dir, version) copy_config(config_dir) - copy_service_file(bin_dir, config_dir, service_dir) + copy_service_file(bin_dir, config_dir, service_dir, user_install) user = "" if user_install: diff --git a/test.compose.yaml b/test.compose.yaml index d232d32b9..681c49a71 100644 --- a/test.compose.yaml +++ b/test.compose.yaml @@ -1,38 +1,65 @@ services: - monitor-periphery: + # komodo-core: + # build: + # context: . + # dockerfile: bin/core/Dockerfile + # restart: unless-stopped + # depends_on: + # - komodo-mongo + # logging: + # driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/` + # network_mode: host + # environment: # https://github.com/mbecker20/komodo/blob/main/config_example/core.config.example.toml + # KOMODO_HOST: http://localhost:9120 + # KOMODO_TITLE: Komodo Test + # KOMODO_ENSURE_SERVER: http://localhost:8120 # Creates the "default" server. + # ## MONGO + # KOMODO_MONGO_ADDRESS: localhost:27017 + # ## KEYS + # KOMODO_PASSKEY: a_random_passkey # used to auth against periphery + # KOMODO_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks + # KOMODO_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart. + # ## AUTH + # KOMODO_LOCAL_AUTH: true + # KOMODO_JWT_TTL: 2-wk + # ## POLLING + # KOMODO_MONITORING_INTERVAL: 5-sec + # KOMODO_STACK_POLL_INTERVAL: 1-min + # KOMODO_SYNC_POLL_INTERVAL: 1-min + # KOMODO_BUILD_POLL_INTERVAL: 1-min + # KOMODO_REPO_POLL_INTERVAL: 1-min + + komodo-periphery: build: context: . dockerfile: bin/periphery/Dockerfile + restart: unless-stopped logging: driver: local - networks: - - monitor-network - ports: - - 8120:8120 + network_mode: host volumes: - /var/run/docker.sock:/var/run/docker.sock - - monitor-repos:/etc/monitor/repos + - komodo-repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. + - komodo-stacks:/etc/komodo/stacks # manage stacks in a docker volume, or change it to an accessible host directory. # environment: # # If the disk size is overreporting, can use one of these to # # whitelist / blacklist the disks to filter them, whichever is easier. - # PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/monitor/repos + # PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/komodo/repos # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap - monitor-mongo: + komodo-mongo: image: mongo + command: --quiet # suppress mongo logs a bit restart: unless-stopped logging: - driver: none - networks: - - monitor-network - ports: - - 27017:27017 + driver: local + network_mode: host volumes: - db-data:/data/db + - db-config:/data/configdb volumes: db-data: - monitor-repos: - -networks: - monitor-network: {} \ No newline at end of file + db-config: + komodo-repos: + komodo-stacks: \ No newline at end of file diff --git a/test.core.config.toml b/test.core.config.toml index a888c8aa6..0082f7012 100644 --- a/test.core.config.toml +++ b/test.core.config.toml @@ -1,7 +1,8 @@ -title = "Test" -host = "http://localhost.9120" +title = "Komodo Test" +host = "http://localhost:9120" passkey = "a_random_passkey" ensure_server = "http://localhost:8120" +repo_directory = "/Users/max/komodo-repos" ############ # DATABASE #