mirror of
https://github.com/moghtech/komodo.git
synced 2025-12-05 19:17:36 -06:00
delete everything for next rewrite
This commit is contained in:
@@ -1,4 +0,0 @@
|
||||
/target
|
||||
/config_example
|
||||
config.*
|
||||
.env
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -1,12 +0,0 @@
|
||||
target
|
||||
/frontend/build
|
||||
node_modules
|
||||
dist
|
||||
.env
|
||||
.env.development
|
||||
|
||||
repos
|
||||
config.json
|
||||
config.toml
|
||||
secrets.json
|
||||
secrets.toml
|
||||
64
.vscode/solid.code-snippets
vendored
64
.vscode/solid.code-snippets
vendored
@@ -1,64 +0,0 @@
|
||||
{
|
||||
"component": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "comp",
|
||||
"body": [
|
||||
"import { Component } from \"solid-js\";",
|
||||
"",
|
||||
"const ${1:$TM_FILENAME_BASE}: Component<{}> = (p) => {",
|
||||
"\treturn (",
|
||||
"\t\t<div>",
|
||||
"\t\t\t${0}",
|
||||
"\t\t</div>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export default ${1:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
"component-with-css": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "css-comp",
|
||||
"body": [
|
||||
"import { Component } from \"solid-js\";",
|
||||
"import s from \"./${1:$TM_FILENAME_BASE}.module.scss\";",
|
||||
"",
|
||||
"const ${2:$TM_FILENAME_BASE}: Component<{}> = (p) => {",
|
||||
"\treturn (",
|
||||
"\t\t<div class={s.${2:$TM_FILENAME_BASE}} >",
|
||||
"\t\t\t${0}",
|
||||
"\t\t</div>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export default ${2:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
"context": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "provider",
|
||||
"body": [
|
||||
"import { ParentComponent, createContext, useContext } from \"solid-js\";",
|
||||
"",
|
||||
"const value = () => {",
|
||||
"\treturn {};",
|
||||
"}",
|
||||
"",
|
||||
"export type Value = ReturnType<typeof value>;",
|
||||
"",
|
||||
"const context = createContext<Value>();",
|
||||
"",
|
||||
"export const Provider: ParentComponent<{}> = (p) => {",
|
||||
"\treturn (",
|
||||
"\t\t<context.Provider value={value()}>",
|
||||
"\t\t\t{p.children}",
|
||||
"\t\t</context.Provider>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export function useValue() {",
|
||||
"\treturn useContext(context) as Value;",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
}
|
||||
164
.vscode/tasks.json
vendored
164
.vscode/tasks.json
vendored
@@ -1,164 +0,0 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "build",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"label": "rust: cargo build"
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "fmt",
|
||||
"label": "rust: cargo fmt"
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "check",
|
||||
"label": "rust: cargo check"
|
||||
},
|
||||
{
|
||||
"label": "start dev",
|
||||
"dependsOn": [
|
||||
"run core",
|
||||
"yarn: start frontend"
|
||||
],
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "yarn start",
|
||||
"label": "yarn: start frontend",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/frontend"
|
||||
},
|
||||
"presentation": {
|
||||
"group": "start"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run core",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/core"
|
||||
},
|
||||
"presentation": {
|
||||
"group": "start"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run periphery",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/periphery"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "cargo install --path . && if pgrep periphery; then pkill periphery; fi && periphery --daemon --config-path ~/.monitor/local.periphery.config.toml",
|
||||
"label": "run periphery daemon",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/periphery"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run cli",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/cli"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run tests",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "publish",
|
||||
"args": ["--allow-dirty"],
|
||||
"label": "publish monitor types",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/lib/types"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "publish",
|
||||
"label": "publish monitor client",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/lib/monitor_client"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "docker compose up -d",
|
||||
"label": "docker compose up",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "docker compose down",
|
||||
"label": "docker compose down",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "docker compose build",
|
||||
"label": "docker compose build",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "docker compose down && docker compose up -d",
|
||||
"label": "docker compose restart",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "docker compose build && docker compose down && docker compose up -d",
|
||||
"label": "docker compose build and restart",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "docker compose build periphery",
|
||||
"label": "docker compose build periphery",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/tests"
|
||||
},
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "typeshare ./lib/types --lang=typescript --output-file=./frontend/src/types.ts && typeshare ./core --lang=typescript --output-file=./frontend/src/util/client_types.ts",
|
||||
"label": "generate typescript types",
|
||||
"problemMatcher": []
|
||||
}
|
||||
]
|
||||
}
|
||||
3951
Cargo.lock
generated
3951
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@@ -1,14 +0,0 @@
|
||||
[workspace]
|
||||
|
||||
members = [
|
||||
"cli",
|
||||
"core",
|
||||
"periphery",
|
||||
"tests",
|
||||
"lib/axum_oauth2",
|
||||
"lib/db_client",
|
||||
"lib/helpers",
|
||||
"lib/periphery_client",
|
||||
"lib/types",
|
||||
"lib/monitor_client"
|
||||
]
|
||||
674
LICENSE
674
LICENSE
@@ -1,674 +0,0 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
@@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "monitor_cli"
|
||||
version = "0.3.4"
|
||||
edition = "2021"
|
||||
authors = ["MoghTech"]
|
||||
description = "monitor cli | tools to setup monitor system"
|
||||
license = "GPL-3.0-or-later"
|
||||
|
||||
[[bin]]
|
||||
name = "monitor"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
monitor_types = { path = "../lib/types" }
|
||||
clap = "4.0"
|
||||
async_timing_util = "0.1.14"
|
||||
rand = "0.8"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
toml = "0.7"
|
||||
run_command = "0.0.5"
|
||||
colored = "2"
|
||||
strum = "0.24"
|
||||
strum_macros = "0.24"
|
||||
@@ -1,621 +0,0 @@
|
||||
use std::{
|
||||
env,
|
||||
fs::{self, File},
|
||||
io::{Read, Write},
|
||||
net::IpAddr,
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use clap::ArgMatches;
|
||||
use colored::Colorize;
|
||||
use monitor_types::{CoreConfig, MongoConfig, PeripheryConfig, RestartMode, Timelength};
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use run_command::run_command_pipe_to_terminal;
|
||||
use serde::Serialize;
|
||||
|
||||
const CORE_IMAGE_NAME: &str = "mbecker2020/monitor_core";
|
||||
const PERIPHERY_IMAGE_NAME: &str = "mbecker2020/monitor_periphery";
|
||||
const PERIPHERY_CRATE: &str = "monitor_periphery";
|
||||
|
||||
pub fn gen_core_config(sub_matches: &ArgMatches) {
|
||||
let host = sub_matches
|
||||
.get_one::<String>("host")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("http://localhost:9000")
|
||||
.to_string();
|
||||
|
||||
let path = sub_matches
|
||||
.get_one::<String>("path")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/core.config.toml")
|
||||
.to_string();
|
||||
|
||||
let port = sub_matches
|
||||
.get_one::<String>("port")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("9000")
|
||||
.parse::<u16>()
|
||||
.expect("invalid port");
|
||||
|
||||
let mongo_uri = sub_matches
|
||||
.get_one::<String>("mongo-uri")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("mongodb://monitor-mongo")
|
||||
.to_string();
|
||||
|
||||
let mongo_db_name = sub_matches
|
||||
.get_one::<String>("mongo-db-name")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("monitor")
|
||||
.to_string();
|
||||
|
||||
let jwt_valid_for = sub_matches
|
||||
.get_one::<String>("jwt-valid-for")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("1-wk")
|
||||
.parse()
|
||||
.expect("invalid jwt-valid-for");
|
||||
|
||||
let slack_url = sub_matches
|
||||
.get_one::<String>("slack-url")
|
||||
.map(|p| p.to_owned());
|
||||
|
||||
let config = CoreConfig {
|
||||
title: String::from("monitor"),
|
||||
host,
|
||||
port,
|
||||
jwt_valid_for,
|
||||
monitoring_interval: Timelength::OneMinute,
|
||||
daily_offset_hours: 0,
|
||||
keep_stats_for_days: 120,
|
||||
slack_url,
|
||||
local_auth: true,
|
||||
github_oauth: Default::default(),
|
||||
google_oauth: Default::default(),
|
||||
aws: Default::default(),
|
||||
docker_organizations: Default::default(),
|
||||
mongo: MongoConfig {
|
||||
uri: mongo_uri,
|
||||
db_name: mongo_db_name,
|
||||
app_name: "monitor".to_string(),
|
||||
},
|
||||
jwt_secret: generate_secret(40),
|
||||
github_webhook_secret: generate_secret(30),
|
||||
github_webhook_base_url: None,
|
||||
passkey: generate_secret(30),
|
||||
};
|
||||
|
||||
write_to_toml(&path, &config);
|
||||
|
||||
println!(
|
||||
"\n✅ {} has been generated at {path} ✅\n",
|
||||
"core config".bold()
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_mongo(sub_matches: &ArgMatches) {
|
||||
let username = sub_matches.get_one::<String>("username");
|
||||
let password = sub_matches.get_one::<String>("password");
|
||||
|
||||
if (username.is_some() && password.is_none()) {
|
||||
println!(
|
||||
"\n❌ must provide {} if username is provided ❌\n",
|
||||
"--password".bold()
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (username.is_none() && password.is_some()) {
|
||||
println!(
|
||||
"\n❌ must provide {} if password is provided ❌\n",
|
||||
"--username".bold()
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
|
||||
|
||||
let name = sub_matches
|
||||
.get_one::<String>("name")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("monitor-mongo");
|
||||
|
||||
let port = sub_matches
|
||||
.get_one::<String>("port")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("27017")
|
||||
.parse::<u16>()
|
||||
.expect("invalid port");
|
||||
|
||||
let network = sub_matches
|
||||
.get_one::<String>("network")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("bridge");
|
||||
|
||||
let mount = sub_matches
|
||||
.get_one::<String>("mount")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/db");
|
||||
|
||||
let restart = sub_matches
|
||||
.get_one::<String>("restart")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("unless-stopped")
|
||||
.parse::<RestartMode>()
|
||||
.expect("invalid restart mode");
|
||||
|
||||
let env = if let (Some(username), Some(password)) = (username, password) {
|
||||
format!(" --env MONGO_INITDB_ROOT_USERNAME={username} --env MONGO_INITDB_ROOT_PASSWORD={password}")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
println!(
|
||||
"\n====================\n {} \n====================\n",
|
||||
"mongo config".bold()
|
||||
);
|
||||
if let Some(username) = username {
|
||||
println!("{}: {username}", "mongo username".dimmed());
|
||||
}
|
||||
println!("{}: {name}", "container name".dimmed());
|
||||
println!("{}: {port}", "port".dimmed());
|
||||
println!("{}: {mount}", "mount".dimmed());
|
||||
println!("{}: {network}", "network".dimmed());
|
||||
println!("{}: {restart}", "restart".dimmed());
|
||||
|
||||
if !skip_enter {
|
||||
println!(
|
||||
"\npress {} to start {}. {}",
|
||||
"ENTER".green().bold(),
|
||||
"MongoDB".bold(),
|
||||
"(ctrl-c to cancel)".dimmed()
|
||||
);
|
||||
|
||||
let buffer = &mut [0u8];
|
||||
let res = std::io::stdin().read_exact(buffer);
|
||||
|
||||
if res.is_err() {
|
||||
println!("pressed another button, exiting");
|
||||
}
|
||||
}
|
||||
|
||||
let stop =
|
||||
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
|
||||
|
||||
let command = format!("docker run -d --name {name} -p {port}:27017 --network {network} -v {mount}:/data/db{env} --restart {restart} --log-opt max-size=15m --log-opt max-file=3 mongo --quiet");
|
||||
|
||||
let output = run_command_pipe_to_terminal(&command);
|
||||
|
||||
if output.success() {
|
||||
println!("\n✅ {} has been started up ✅\n", "monitor mongo".bold())
|
||||
} else {
|
||||
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_core(sub_matches: &ArgMatches) {
|
||||
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
|
||||
|
||||
let config_path = sub_matches
|
||||
.get_one::<String>("config-path")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/core.config.toml")
|
||||
.to_string();
|
||||
|
||||
let name = sub_matches
|
||||
.get_one::<String>("name")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("monitor-core");
|
||||
|
||||
let port = sub_matches
|
||||
.get_one::<String>("port")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("9000")
|
||||
.parse::<u16>()
|
||||
.expect("invalid port");
|
||||
|
||||
let network = sub_matches
|
||||
.get_one::<String>("network")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("bridge");
|
||||
|
||||
let restart = sub_matches
|
||||
.get_one::<String>("restart")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("unless-stopped")
|
||||
.parse::<RestartMode>()
|
||||
.expect("invalid restart mode");
|
||||
|
||||
let add_host = sub_matches
|
||||
.get_one::<bool>("add-internal-host")
|
||||
.map(|p| *p)
|
||||
.unwrap_or(true);
|
||||
|
||||
println!(
|
||||
"\n===================\n {} \n===================\n",
|
||||
"core config".bold()
|
||||
);
|
||||
println!("{}: {name}", "container name".dimmed());
|
||||
println!("{}: {config_path}", "config path".dimmed());
|
||||
println!("{}: {port}", "port".dimmed());
|
||||
println!("{}: {network}", "network".dimmed());
|
||||
println!("{}: {restart}", "restart".dimmed());
|
||||
println!("{}: {add_host}", "add internal host".dimmed());
|
||||
|
||||
if !skip_enter {
|
||||
println!(
|
||||
"\npress {} to start {}. {}",
|
||||
"ENTER".green().bold(),
|
||||
"monitor core".bold(),
|
||||
"(ctrl-c to cancel)".dimmed()
|
||||
);
|
||||
|
||||
let buffer = &mut [0u8];
|
||||
let res = std::io::stdin().read_exact(buffer);
|
||||
|
||||
if res.is_err() {
|
||||
println!("pressed another button, exiting");
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nstarting monitor core container...\n");
|
||||
|
||||
let _ = run_command_pipe_to_terminal(&format!("docker pull {CORE_IMAGE_NAME}"));
|
||||
|
||||
let _ =
|
||||
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
|
||||
|
||||
let add_host = if add_host {
|
||||
" --add-host host.docker.internal:host-gateway"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
let command = format!("docker run -d --name {name} -p {port}:9000 --network {network} -v {config_path}:/config/config.toml --restart {restart}{add_host} {CORE_IMAGE_NAME}");
|
||||
|
||||
let output = run_command_pipe_to_terminal(&command);
|
||||
|
||||
if output.success() {
|
||||
println!("\n✅ {} has been started up ✅\n", "monitor core".bold())
|
||||
} else {
|
||||
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gen_periphery_config(sub_matches: &ArgMatches) {
|
||||
let path = sub_matches
|
||||
.get_one::<String>("path")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/periphery.config.toml")
|
||||
.to_string();
|
||||
|
||||
let port = sub_matches
|
||||
.get_one::<String>("port")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("8000")
|
||||
.parse::<u16>()
|
||||
.expect("invalid port");
|
||||
|
||||
let stats_polling_rate = sub_matches
|
||||
.get_one::<String>("stats-polling-rate")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("5-sec")
|
||||
.parse::<Timelength>()
|
||||
.expect("invalid timelength");
|
||||
|
||||
let allowed_ips = sub_matches
|
||||
.get_one::<String>("allowed-ips")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("")
|
||||
.split(",")
|
||||
.filter(|ip| ip.len() > 0)
|
||||
.map(|ip| {
|
||||
ip.parse()
|
||||
.expect("given allowed ip address is not valid ip")
|
||||
})
|
||||
.collect::<Vec<IpAddr>>();
|
||||
|
||||
let repo_dir = sub_matches
|
||||
.get_one::<String>("repo-dir")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/repos")
|
||||
.to_string()
|
||||
.replace("~", env::var("HOME").unwrap().as_str())
|
||||
.parse()
|
||||
.expect("failed to parse --repo_dir as path");
|
||||
|
||||
let config = PeripheryConfig {
|
||||
port,
|
||||
repo_dir,
|
||||
stats_polling_rate,
|
||||
allowed_ips,
|
||||
passkeys: vec![],
|
||||
secrets: Default::default(),
|
||||
github_accounts: Default::default(),
|
||||
docker_accounts: Default::default(),
|
||||
};
|
||||
|
||||
write_to_toml(&path, &config);
|
||||
|
||||
println!(
|
||||
"\n✅ {} generated at {path} ✅\n",
|
||||
"periphery config".bold()
|
||||
);
|
||||
}
|
||||
|
||||
pub fn start_periphery_systemd(sub_matches: &ArgMatches) {
|
||||
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
|
||||
|
||||
let install = *sub_matches.get_one::<bool>("install").unwrap_or(&false);
|
||||
|
||||
let config_path = sub_matches
|
||||
.get_one::<String>("config-path")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/periphery.config.toml")
|
||||
.to_string();
|
||||
|
||||
println!(
|
||||
"\n========================\n {} \n========================\n",
|
||||
"periphery config".bold()
|
||||
);
|
||||
println!("{}: systemd", "run with".dimmed());
|
||||
println!("{}: {config_path}", "config path".dimmed());
|
||||
|
||||
if !skip_enter {
|
||||
println!(
|
||||
"\npress {} to start {}. {}",
|
||||
"ENTER".green().bold(),
|
||||
"monitor periphery".bold(),
|
||||
"(ctrl-c to cancel)".dimmed()
|
||||
);
|
||||
|
||||
let buffer = &mut [0u8];
|
||||
let res = std::io::stdin().read_exact(buffer);
|
||||
|
||||
if res.is_err() {
|
||||
println!("pressed another button, exiting");
|
||||
}
|
||||
}
|
||||
|
||||
if install {
|
||||
install_periphery_from_crates_io();
|
||||
}
|
||||
|
||||
gen_periphery_service_file(&config_path);
|
||||
|
||||
let user = env::var("USER").expect("failed to find $USER env var");
|
||||
|
||||
let command =
|
||||
format!("systemctl --user daemon-reload && systemctl --user enable --now periphery && loginctl enable-linger {user}");
|
||||
|
||||
let output = run_command_pipe_to_terminal(&command);
|
||||
|
||||
if output.success() {
|
||||
println!(
|
||||
"\n✅ {} has been started up ✅\n",
|
||||
"monitor periphery".bold()
|
||||
)
|
||||
} else {
|
||||
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_periphery_daemon(sub_matches: &ArgMatches) {
|
||||
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
|
||||
|
||||
let install = *sub_matches.get_one::<bool>("install").unwrap_or(&false);
|
||||
|
||||
let config_path = sub_matches
|
||||
.get_one::<String>("config-path")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/periphery.config.toml")
|
||||
.to_string();
|
||||
|
||||
let stdout = sub_matches
|
||||
.get_one::<String>("stdout")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/periphery.log.out")
|
||||
.to_string();
|
||||
|
||||
let stderr = sub_matches
|
||||
.get_one::<String>("stderr")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/periphery.log.err")
|
||||
.to_string();
|
||||
|
||||
println!(
|
||||
"\n========================\n {} \n========================\n",
|
||||
"periphery config".bold()
|
||||
);
|
||||
println!("{}: daemon", "run as".dimmed());
|
||||
println!("{}: {config_path}", "config path".dimmed());
|
||||
println!("{}: {stdout}", "stdout".dimmed());
|
||||
println!("{}: {stderr}", "stderr".dimmed());
|
||||
|
||||
if !skip_enter {
|
||||
println!(
|
||||
"\npress {} to start {}. {}",
|
||||
"ENTER".green().bold(),
|
||||
"monitor periphery".bold(),
|
||||
"(ctrl-c to cancel)".dimmed()
|
||||
);
|
||||
|
||||
let buffer = &mut [0u8];
|
||||
let res = std::io::stdin().read_exact(buffer);
|
||||
|
||||
if res.is_err() {
|
||||
println!("pressed another button, exiting");
|
||||
}
|
||||
}
|
||||
|
||||
if install {
|
||||
install_periphery_from_crates_io();
|
||||
}
|
||||
|
||||
let command = format!("if pgrep periphery; then pkill periphery; fi && periphery --daemon --config-path {config_path} --stdout {stdout} --stderr {stderr}");
|
||||
|
||||
let output = run_command_pipe_to_terminal(&command);
|
||||
|
||||
if output.success() {
|
||||
println!(
|
||||
"\n✅ {} has been started up ✅\n",
|
||||
"monitor periphery".bold()
|
||||
)
|
||||
} else {
|
||||
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_periphery_container(sub_matches: &ArgMatches) {
|
||||
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
|
||||
|
||||
let config_path = sub_matches
|
||||
.get_one::<String>("config-path")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/periphery.config.toml")
|
||||
.to_string();
|
||||
|
||||
let repo_dir = sub_matches
|
||||
.get_one::<String>("repo-dir")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("~/.monitor/repos")
|
||||
.to_string();
|
||||
|
||||
let name = sub_matches
|
||||
.get_one::<String>("name")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("monitor-periphery");
|
||||
|
||||
let port = sub_matches
|
||||
.get_one::<String>("port")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("8000")
|
||||
.parse::<u16>()
|
||||
.expect("invalid port");
|
||||
|
||||
let network = sub_matches
|
||||
.get_one::<String>("network")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("bridge");
|
||||
|
||||
let restart = sub_matches
|
||||
.get_one::<String>("restart")
|
||||
.map(|p| p.as_str())
|
||||
.unwrap_or("unless-stopped")
|
||||
.parse::<RestartMode>()
|
||||
.expect("invalid restart mode");
|
||||
|
||||
println!(
|
||||
"\n========================\n {} \n========================\n",
|
||||
"periphery config".bold()
|
||||
);
|
||||
println!("{}: container", "run as".dimmed());
|
||||
println!("{}: {name}", "container name".dimmed());
|
||||
println!("{}: {config_path}", "config path".dimmed());
|
||||
println!("{}: {repo_dir}", "repo folder".dimmed());
|
||||
println!("{}: {port}", "port".dimmed());
|
||||
println!("{}: {network}", "network".dimmed());
|
||||
println!("{}: {restart}", "restart".dimmed());
|
||||
|
||||
if !skip_enter {
|
||||
println!(
|
||||
"\npress {} to start {}. {}",
|
||||
"ENTER".green().bold(),
|
||||
"monitor periphery".bold(),
|
||||
"(ctrl-c to cancel)".dimmed()
|
||||
);
|
||||
|
||||
let buffer = &mut [0u8];
|
||||
let res = std::io::stdin().read_exact(buffer);
|
||||
|
||||
if res.is_err() {
|
||||
println!("pressed another button, exiting");
|
||||
}
|
||||
}
|
||||
|
||||
println!("\nstarting monitor periphery container...\n");
|
||||
|
||||
let _ = run_command_pipe_to_terminal(&format!("docker pull {PERIPHERY_IMAGE_NAME}"));
|
||||
|
||||
let _ =
|
||||
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
|
||||
|
||||
let command = format!("docker run -d --name {name} -p {port}:8000 --network {network} -v {config_path}:/config/config.toml -v {repo_dir}:/repos -v /var/run/docker.sock:/var/run/docker.sock --restart {restart} {PERIPHERY_IMAGE_NAME}");
|
||||
|
||||
let output = run_command_pipe_to_terminal(&command);
|
||||
|
||||
if output.success() {
|
||||
println!(
|
||||
"\n✅ {} has been started up ✅\n",
|
||||
"monitor periphery".bold()
|
||||
)
|
||||
} else {
|
||||
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gen_periphery_service_file(config_path: &str) {
|
||||
let home = env::var("HOME").expect("failed to find $HOME env var");
|
||||
let _ = std::fs::create_dir_all(format!("{home}/.config/systemd/user"));
|
||||
let mut file = File::create(format!("{home}/.config/systemd/user/periphery.service"))
|
||||
.expect("failed to create user systemd unit file");
|
||||
file.write_all(periphery_unit_file(config_path).as_bytes())
|
||||
.expect("failed to write config file");
|
||||
}
|
||||
|
||||
fn write_to_toml(path: &str, toml: impl Serialize) {
|
||||
let path = PathBuf::from_str(&path.replace("~", &std::env::var("HOME").unwrap()))
|
||||
.expect("not a valid path");
|
||||
let _ = fs::create_dir_all(pop_path(&path));
|
||||
fs::write(
|
||||
path,
|
||||
toml::to_string(&toml).expect("failed to parse config into toml"),
|
||||
)
|
||||
.expect("❌ failed to write toml to file ❌");
|
||||
}
|
||||
|
||||
fn pop_path(path: &PathBuf) -> PathBuf {
|
||||
let mut clone = path.clone();
|
||||
clone.pop();
|
||||
clone
|
||||
}
|
||||
|
||||
fn generate_secret(length: usize) -> String {
|
||||
rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(length)
|
||||
.map(char::from)
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn periphery_unit_file(config_path: &str) -> String {
|
||||
let home = env::var("HOME").expect("failed to find $HOME env var");
|
||||
let user = env::var("USER").expect("failed to find $USER env var");
|
||||
format!(
|
||||
"[Unit]
|
||||
Description=agent to connect with monitor core
|
||||
|
||||
[Service]
|
||||
ExecStart={home}/.monitor/bin/periphery --config-path {config_path} --home-dir {home}
|
||||
Restart=on-failure
|
||||
TimeoutStartSec=0
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target"
|
||||
)
|
||||
}
|
||||
|
||||
fn install_periphery_from_crates_io() {
|
||||
println!("\ninstalling periphery binary...\n");
|
||||
|
||||
let install_output = run_command_pipe_to_terminal(&format!("cargo install {PERIPHERY_CRATE}"));
|
||||
|
||||
if install_output.success() {
|
||||
println!("\ninstallation finished, starting monitor periphery daemon\n");
|
||||
} else {
|
||||
panic!(
|
||||
"\n❌ there was some {} during periphery installation ❌\n",
|
||||
"error".red()
|
||||
)
|
||||
}
|
||||
}
|
||||
252
cli/src/main.rs
252
cli/src/main.rs
@@ -1,252 +0,0 @@
|
||||
#![allow(unused)]
|
||||
|
||||
use clap::{arg, Arg, Command};
|
||||
|
||||
mod helpers;
|
||||
|
||||
use helpers::*;
|
||||
|
||||
fn cli() -> Command {
|
||||
Command::new("monitor")
|
||||
.about("\na cli to set up monitor components, like the periphery client")
|
||||
.version(env!("CARGO_PKG_VERSION"))
|
||||
.subcommand_required(true)
|
||||
.arg_required_else_help(true)
|
||||
.allow_external_subcommands(true)
|
||||
.subcommand(
|
||||
Command::new("core")
|
||||
.about("tools to set up monitor core")
|
||||
.subcommand_required(true)
|
||||
.arg_required_else_help(true)
|
||||
.allow_external_subcommands(true)
|
||||
.subcommand(
|
||||
Command::new("gen-config")
|
||||
.about("generate a core config file")
|
||||
.arg(
|
||||
arg!(--host <HOST> "the host to use with oauth redirect url, whatever host the user hits to access monitor. eg 'https://monitor.mogh.tech'")
|
||||
.required(true)
|
||||
)
|
||||
.arg(
|
||||
arg!(--path <PATH> "sets path of generated config file. default is '~/.monitor/core.config.toml'")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--port <PORT> "sets port core will run on. default is 9000. if running in docker, keep this port as is, set the external port when running core start command")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"mongo-uri" <URI> "sets the mongo uri to use. default is 'mongodb://monitor-mongo'")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"mongo-db-name" <NAME> "sets the db name to use. default is 'monitor'")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"jwt-valid-for" <TIMELENGTH> "sets the length of time jwt stays valid for. default is 1-wk (one week)")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"slack-url" <URL> "sets the slack url to use for slack notifications")
|
||||
.required(false)
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
Command::new("start-mongo")
|
||||
.about("start up a local mongo container for monitor core")
|
||||
.arg(
|
||||
arg!(--yes "used in scripts to skip 'enter to continue' step")
|
||||
)
|
||||
.arg(
|
||||
arg!(--name <NAME> "specify the name of the mongo container. default is monitor-mongo")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--username <USERNAME> "specify the admin username for mongo. default is mongo with no auth")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--password <PASSWORD> "specify the admin password for mongo. default is mongo with no auth")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--port <PORT> "sets port mongo will run on. default is 27017")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--mount <PATH> "sets the path the mongo data is mounted into. default is ~/.monitor/db")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--network <NETWORK> "sets docker network of mongo container. default is bridge")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--restart <RESTART> "sets docker restart mode of mongo container. default is unless-stopped")
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
Command::new("start")
|
||||
.about("start up monitor core in container")
|
||||
.arg(
|
||||
arg!(--yes "used in scripts to skip 'enter to continue' step")
|
||||
)
|
||||
.arg(
|
||||
arg!(--name <NAME> "specify the name of the monitor core container. default is monitor-core")
|
||||
)
|
||||
.arg(
|
||||
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/core.config.toml")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--port <PORT> "sets port monitor core will run on. default is 9000")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--network <NETWORK> "sets docker network of monitor core container. default is bridge")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--restart <RESTART> "sets docker restart mode of monitor core container. default is unless-stopped")
|
||||
)
|
||||
.arg(
|
||||
arg!(--"add-internal-host" "adds the docker flag '--add-host=host.docker.internal:host-gateway'. default is true")
|
||||
)
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
Command::new("periphery")
|
||||
.about("tools to set up monitor periphery")
|
||||
.subcommand_required(true)
|
||||
.arg_required_else_help(true)
|
||||
.allow_external_subcommands(true)
|
||||
.subcommand(
|
||||
Command::new("gen-config")
|
||||
.about("generate a periphery config file")
|
||||
.arg(
|
||||
arg!(--path <PATH> "sets path of generated config file. default is '~/.monitor/periphery.config.toml'")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--port <PORT> "sets port periphery will run on. default is 8000. if running in docker, keep this port as is, set the external port when running periphery start command")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"stats-polling-rate" <INTERVAL> "sets stats polling rate to control granularity of system stats returned. default is 5-sec. options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"allowed-ips" <IPS> "used to only accept requests from known ips. give ips as comma seperated list, like '--allowed_ips 127.0.0.1,10.20.30.43'. default is empty, which will not block any ip.")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--"repo-dir" <PATH> "if running in container, this should be '/repos'. default is ~/.monitor/repos").required(false)
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
Command::new("start")
|
||||
.about("tools to start periphery as daemon or container")
|
||||
.subcommand(
|
||||
Command::new("systemd")
|
||||
.about("manage periphery with systemd running under current user")
|
||||
.arg(
|
||||
arg!(--yes "used in scripts to skip 'enter to continue' step")
|
||||
)
|
||||
.arg(
|
||||
arg!(--install "specify this to install periphery from crates.io")
|
||||
)
|
||||
.arg(
|
||||
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
|
||||
.required(false)
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
Command::new("daemon")
|
||||
.about("start up monitor periphery daemon")
|
||||
.arg(
|
||||
arg!(--yes "used in scripts to skip 'enter to continue' step")
|
||||
)
|
||||
.arg(
|
||||
arg!(--install "specify this to install periphery from crates.io")
|
||||
)
|
||||
.arg(
|
||||
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--stdout <PATH> "specify the file path for periphery to log stdout to. default is ~/.monitor/periphery.log.out")
|
||||
.required(false)
|
||||
)
|
||||
.arg(
|
||||
arg!(--stderr <PATH> "specify the file path for periphery to log stderr to. default is ~/.monitor/periphery.log.err")
|
||||
.required(false)
|
||||
)
|
||||
)
|
||||
// .subcommand(
|
||||
// Command::new("container")
|
||||
// .about("start up monitor periphery in docker container")
|
||||
// .arg(
|
||||
// arg!(--yes "used in scripts to skip 'enter to continue' step")
|
||||
// )
|
||||
// .arg(
|
||||
// arg!(--name <NAME> "specify the name of the monitor periphery container. default is monitor-periphery")
|
||||
// )
|
||||
// .arg(
|
||||
// arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
|
||||
// .required(false)
|
||||
// )
|
||||
// .arg(arg!(--"repo-dir" <PATH> "specify the folder on host to clone repos into. default is ~/.monitor/repos").required(false))
|
||||
// .arg(
|
||||
// arg!(--port <PORT> "sets port monitor periphery will run on. default is 8000")
|
||||
// .required(false)
|
||||
// )
|
||||
// .arg(
|
||||
// arg!(--network <NETWORK> "sets docker network of monitor periphery container. default is bridge")
|
||||
// .required(false)
|
||||
// )
|
||||
// .arg(
|
||||
// arg!(--restart <RESTART> "sets docker restart mode of monitor periphery container. default is unless-stopped")
|
||||
// )
|
||||
// )
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let matches = cli().get_matches();
|
||||
|
||||
match matches.subcommand() {
|
||||
Some(("core", sub_matches)) => {
|
||||
let core_command = sub_matches.subcommand().expect("\n❌ invalid call, should be 'monitor core <gen-config, start-mongo, start> <flags>' ❌\n");
|
||||
match core_command {
|
||||
("gen-config", sub_matches) => gen_core_config(sub_matches),
|
||||
("start-mongo", sub_matches) => start_mongo(sub_matches),
|
||||
("start", sub_matches) => start_core(sub_matches),
|
||||
_ => {
|
||||
println!("\n❌ invalid call, should be 'monitor core <gen-config, start-mongo, start> <flags>' ❌\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(("periphery", sub_matches)) => {
|
||||
let periphery_command = sub_matches.subcommand().expect(
|
||||
"\n❌ invalid call, should be 'monitor periphery <gen-config, start> <flags>' ❌\n",
|
||||
);
|
||||
match periphery_command {
|
||||
("gen-config", sub_matches) => gen_periphery_config(sub_matches),
|
||||
("start", sub_matches) => {
|
||||
let periphery_start_command = sub_matches.subcommand().expect("\n❌ invalid call, should be 'monitor periphery start <daemon, container> <flags>' ❌\n");
|
||||
match periphery_start_command {
|
||||
("systemd", sub_matches) => start_periphery_systemd(sub_matches),
|
||||
("daemon", sub_matches) => start_periphery_daemon(sub_matches),
|
||||
// ("container", sub_matches) => start_periphery_container(sub_matches),
|
||||
_ => println!("\n❌ invalid call, should be 'monitor periphery start <daemon, container> <flags>' ❌\n")
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
println!("\n❌ invalid call, should be 'monitor periphery <gen-config, start>...' ❌\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => println!("\n❌ invalid call, should be 'monitor <core, periphery> ...' ❌\n"),
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
# optional. this will be the document title on the web page (shows up as text in the browser tab). default is 'monitor'
|
||||
title = "monitor"
|
||||
|
||||
# this should be the url used to access monitor in browser, potentially behind DNS, eg https://monitor.mogh.tech or http://12.34.56.78:9000
|
||||
host = "https://monitor.mogh.tech"
|
||||
|
||||
# the port the core system will run on. if running core in docker container, leave as this port as 9000 and use port bind eg. -p 9001:9000
|
||||
port = 9000
|
||||
|
||||
# daily utc offset in hours to send daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
|
||||
daily_offset_hours = 13
|
||||
|
||||
# secret used to generate the jwt. should be some randomly generated hash.
|
||||
jwt_secret = "your_jwt_secret"
|
||||
|
||||
# can be 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day
|
||||
jwt_valid_for = "1-wk"
|
||||
|
||||
# webhook url given by slack app that monitor will send alerts and a daily update to
|
||||
slack_url = "your_slack_app_webhook_url"
|
||||
|
||||
# token that has to be given to github during repo webhook config as the secret
|
||||
github_webhook_secret = "your_random_webhook_secret"
|
||||
|
||||
# optional. an alternate base url that is used to recieve github webhook requests. if not provided, will use 'host' address as base
|
||||
github_webhook_base_url = "https://monitor-github-webhook.mogh.tech"
|
||||
|
||||
# token used to authenticate core requests to periphery
|
||||
passkey = "your_random_passkey"
|
||||
|
||||
# controls the granularity of the system stats collection by monitor core
|
||||
# can be 15-sec, 30-sec, 1-min, 2-min, 5-min
|
||||
monitoring_interval = "1-min"
|
||||
|
||||
# number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
|
||||
keep_stats_for_days = 14
|
||||
|
||||
# these will be used by the GUI to attach to builds. New build docker orgs will default to first org (or none if empty).
|
||||
# when attached to build, image will be pushed to repo under the specified organization
|
||||
docker_organizations = ["your_docker_org1", "your_docker_org_2"]
|
||||
|
||||
# allow or deny user login with username / password
|
||||
local_auth = true
|
||||
|
||||
[github_oauth]
|
||||
enabled = true
|
||||
id = "your_github_client_id"
|
||||
secret = "your_github_client_secret"
|
||||
|
||||
[google_oauth]
|
||||
enabled = true
|
||||
id = "your_google_client_id"
|
||||
secret = "your_google_client_secret"
|
||||
|
||||
[mongo]
|
||||
uri = "your_mongo_uri"
|
||||
app_name = "monitor_core"
|
||||
db_name = "monitor" # this is the name of the mongo database that monitor will create its collections in.
|
||||
|
||||
[aws]
|
||||
access_key_id = "your_aws_key_id"
|
||||
secret_access_key = "your_aws_secret_key"
|
||||
default_region = "us-east-1"
|
||||
default_ami_name = "your_ami_name" # must be defined below in [aws.available_ami_accounts]
|
||||
default_instance_type = "m5.2xlarge"
|
||||
default_volume_gb = 8
|
||||
default_subnet_id = "your_default_subnet_id"
|
||||
default_security_group_ids = ["sg_id_1", "sg_id_2"]
|
||||
default_key_pair_name = "your_default_key_pair_name"
|
||||
default_assign_public_ip = false
|
||||
|
||||
[aws.available_ami_accounts]
|
||||
your_ami_name = { ami_id = "ami-1234567890", github = ["github_username"], docker = ["docker_username"] }
|
||||
@@ -1 +0,0 @@
|
||||
CONFIG_PATH=../config/core.config.example.toml # optional, default is /config/config.toml. this is usually bind mounted into the container
|
||||
@@ -1,16 +0,0 @@
|
||||
port = 8000 # optional. 8000 is default
|
||||
repo_dir = "/repos" # optional. /repos is default. no reason to change if running the docker container, just mount your desired repo dir to /repos in the container
|
||||
stats_polling_rate = "5-sec" # optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded
|
||||
allowed_ips = ["127.0.0.1"] # optional. default is empty, which will not block any request by ip.
|
||||
passkeys = ["abcdefghijk"] # optional. default is empty, which will not require any passkey to be passed by core.
|
||||
|
||||
[secrets] # optional. can inject these values into your deployments configuration.
|
||||
secret_variable = "secret_value"
|
||||
|
||||
[github_accounts] # optional
|
||||
github_username1 = "github_token1"
|
||||
github_username2 = "github_token2"
|
||||
|
||||
[docker_accounts] # optional
|
||||
docker_username1 = "docker_token1"
|
||||
docker_username2 = "docker_token2"
|
||||
@@ -1 +0,0 @@
|
||||
CONFIG_PATH=../config/periphery.config.example.toml # optional, default is /config/config.toml. this is usually bind mounted into the container
|
||||
@@ -1,39 +0,0 @@
|
||||
[package]
|
||||
name = "core"
|
||||
version = "0.3.4"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
helpers = { package = "monitor_helpers", path = "../lib/helpers" }
|
||||
types = { package = "monitor_types", path = "../lib/types" }
|
||||
db = { package = "db_client", path = "../lib/db_client" }
|
||||
periphery = { package = "periphery_client", path = "../lib/periphery_client" }
|
||||
axum_oauth2 = { path = "../lib/axum_oauth2" }
|
||||
tokio = { version = "1.28", features = ["full"] }
|
||||
tokio-tungstenite = { version = "0.19", features=["native-tls"] }
|
||||
tokio-util = { version = "0.7"}
|
||||
axum = { version = "0.6", features = ["ws", "json"] }
|
||||
tower = { version = "0.4", features = ["timeout"] }
|
||||
tower-http = { version = "0.4", features = ["fs", "cors"] }
|
||||
slack = { package = "slack_client_rs", version = "0.0.8" }
|
||||
futures-util = "0.3"
|
||||
mungos = "0.3.19"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
dotenv = "0.15"
|
||||
envy = "0.4"
|
||||
anyhow = "1.0"
|
||||
bcrypt = "0.14"
|
||||
jwt = "0.16"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
async_timing_util = "0.1.14"
|
||||
diff-struct = "0.5"
|
||||
typeshare = "1.0.1"
|
||||
hex = "0.4"
|
||||
aws-config = "0.55.2"
|
||||
aws-sdk-ec2 = "0.27.0"
|
||||
merge_config_files = "0.1.3"
|
||||
termination_signal = "0.1.2"
|
||||
@@ -1,23 +0,0 @@
|
||||
FROM rust:latest as builder
|
||||
WORKDIR /builder
|
||||
|
||||
COPY ./core ./core
|
||||
|
||||
COPY ./lib/types ./lib/types
|
||||
COPY ./lib/helpers ./lib/helpers
|
||||
|
||||
COPY ./lib/db_client ./lib/db_client
|
||||
COPY ./lib/periphery_client ./lib/periphery_client
|
||||
COPY ./lib/axum_oauth2 ./lib/axum_oauth2
|
||||
|
||||
RUN cd core && cargo build --release
|
||||
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY ./frontend/build /frontend
|
||||
|
||||
COPY --from=builder /builder/core/target/release/core /
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
CMD ["./core"]
|
||||
@@ -1,685 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use aws_sdk_ec2::Client;
|
||||
use diff::Diff;
|
||||
use futures_util::future::join_all;
|
||||
use helpers::{all_logs_success, to_monitor_name};
|
||||
use mungos::mongodb::bson::{doc, to_bson};
|
||||
use types::{
|
||||
monitor_timestamp, traits::Permissioned, AwsBuilderBuildConfig, Build, DockerContainerState,
|
||||
Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget, Version,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
auth::RequestUser,
|
||||
cloud::aws::{
|
||||
create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance,
|
||||
},
|
||||
helpers::empty_or_only_spaces,
|
||||
state::State,
|
||||
};
|
||||
|
||||
const BUILDER_POLL_RATE_SECS: u64 = 2;
|
||||
const BUILDER_POLL_MAX_TRIES: usize = 30;
|
||||
|
||||
impl State {
|
||||
pub async fn get_build_check_permissions(
|
||||
&self,
|
||||
build_id: &str,
|
||||
user: &RequestUser,
|
||||
permission_level: PermissionLevel,
|
||||
) -> anyhow::Result<Build> {
|
||||
let build = self.db.get_build(build_id).await?;
|
||||
let permissions = build.get_user_permissions(&user.id);
|
||||
if user.is_admin || permissions >= permission_level {
|
||||
Ok(build)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"user does not have required permissions on this build"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_build(&self, name: &str, user: &RequestUser) -> anyhow::Result<Build> {
|
||||
if !user.is_admin && !user.create_build_permissions {
|
||||
return Err(anyhow!("user does not have permission to create builds"));
|
||||
}
|
||||
let start_ts = monitor_timestamp();
|
||||
let build = Build {
|
||||
name: to_monitor_name(name),
|
||||
docker_organization: self
|
||||
.config
|
||||
.docker_organizations
|
||||
.get(0)
|
||||
.map(|d| d.to_string()),
|
||||
aws_config: Some(AwsBuilderBuildConfig::default()),
|
||||
permissions: [(user.id.clone(), PermissionLevel::Update)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
last_built_at: "never".to_string(),
|
||||
created_at: start_ts.clone(),
|
||||
updated_at: start_ts.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let build_id = self
|
||||
.db
|
||||
.builds
|
||||
.create_one(build)
|
||||
.await
|
||||
.context("failed at adding build to db")?;
|
||||
let build = self.db.get_build(&build_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Build(build_id),
|
||||
operation: Operation::CreateBuild,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(build)
|
||||
}
|
||||
|
||||
pub async fn create_full_build(
|
||||
&self,
|
||||
mut build: Build,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Build> {
|
||||
build.id = self.create_build(&build.name, user).await?.id;
|
||||
let build = self.update_build(build, user).await?;
|
||||
Ok(build)
|
||||
}
|
||||
|
||||
pub async fn copy_build(
|
||||
&self,
|
||||
target_id: &str,
|
||||
new_name: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Build> {
|
||||
let mut build = self
|
||||
.get_build_check_permissions(target_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
build.name = new_name;
|
||||
build.version = Version::default();
|
||||
let build = self.create_full_build(build, user).await?;
|
||||
Ok(build)
|
||||
}
|
||||
|
||||
pub async fn delete_build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Build> {
|
||||
if self.build_action_states.busy(build_id).await {
|
||||
return Err(anyhow!("build busy"));
|
||||
}
|
||||
let build = self
|
||||
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
self.db.builds.delete_one(build_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Build(build_id.to_string()),
|
||||
operation: Operation::DeleteBuild,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
logs: vec![Log::simple(
|
||||
"delete build",
|
||||
format!("deleted build {}", build.name),
|
||||
)],
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(build)
|
||||
}
|
||||
|
||||
pub async fn update_build(
|
||||
&self,
|
||||
new_build: Build,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Build> {
|
||||
if self.build_action_states.busy(&new_build.id).await {
|
||||
return Err(anyhow!("build busy"));
|
||||
}
|
||||
let id = new_build.id.clone();
|
||||
self.build_action_states
|
||||
.update_entry(id.clone(), |entry| {
|
||||
entry.updating = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.update_build_inner(new_build, user).await;
|
||||
|
||||
self.build_action_states
|
||||
.update_entry(id.clone(), |entry| {
|
||||
entry.updating = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn update_build_inner(
|
||||
&self,
|
||||
mut new_build: Build,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Build> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let current_build = self
|
||||
.get_build_check_permissions(&new_build.id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
|
||||
if let Some(new_server_id) = &new_build.server_id {
|
||||
if current_build.server_id.is_none()
|
||||
|| new_server_id != current_build.server_id.as_ref().unwrap()
|
||||
{
|
||||
self.get_server_check_permissions(new_server_id, user, PermissionLevel::Update)
|
||||
.await
|
||||
.context("user does not have permission to attach build to this server")?;
|
||||
}
|
||||
}
|
||||
|
||||
// none of these should be changed through this method
|
||||
new_build.name = current_build.name.clone();
|
||||
new_build.permissions = current_build.permissions.clone();
|
||||
new_build.last_built_at = current_build.last_built_at.clone();
|
||||
new_build.created_at = current_build.created_at.clone();
|
||||
new_build.updated_at = start_ts.clone();
|
||||
|
||||
// filter out any build args that contain empty strings
|
||||
// these could only happen by accident
|
||||
new_build.docker_build_args = new_build.docker_build_args.map(|mut args| {
|
||||
args.build_args = args
|
||||
.build_args
|
||||
.into_iter()
|
||||
.filter(|a| !empty_or_only_spaces(&a.variable) && !empty_or_only_spaces(&a.value))
|
||||
.collect();
|
||||
args
|
||||
});
|
||||
|
||||
self.db
|
||||
.builds
|
||||
.update_one(&new_build.id, mungos::Update::Regular(new_build.clone()))
|
||||
.await
|
||||
.context("failed at update one build")?;
|
||||
|
||||
let diff = current_build.diff(&new_build);
|
||||
|
||||
let update = Update {
|
||||
operation: Operation::UpdateBuild,
|
||||
target: UpdateTarget::Build(new_build.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::Complete,
|
||||
logs: vec![Log::simple(
|
||||
"build update",
|
||||
serde_json::to_string_pretty(&diff).unwrap(),
|
||||
)],
|
||||
operator: user.id.clone(),
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
// if any_option_diff_is_some(&[&diff.repo, &diff.branch, &diff.github_account])
|
||||
// || option_diff_is_some(&diff.on_clone)
|
||||
// {
|
||||
// let server = self.db.get_server(¤t_build.server_id).await?;
|
||||
// match self.periphery.clone_repo(&server, &new_build).await {
|
||||
// Ok(clone_logs) => {
|
||||
// update.logs.extend(clone_logs);
|
||||
// }
|
||||
// Err(e) => update
|
||||
// .logs
|
||||
// .push(Log::error("cloning repo", format!("{e:#?}"))),
|
||||
// }
|
||||
// }
|
||||
|
||||
// update.end_ts = Some(monitor_timestamp());
|
||||
// update.success = all_logs_success(&update.logs);
|
||||
// update.status = UpdateStatus::Complete;
|
||||
|
||||
self.add_update(update).await?;
|
||||
|
||||
Ok(new_build)
|
||||
}
|
||||
|
||||
pub async fn build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
|
||||
if self.build_action_states.busy(build_id).await {
|
||||
return Err(anyhow!("build busy"));
|
||||
}
|
||||
self.build_action_states
|
||||
.update_entry(build_id.to_string(), |entry| {
|
||||
entry.building = true;
|
||||
})
|
||||
.await;
|
||||
let res = self.build_inner(build_id, user).await;
|
||||
self.build_action_states
|
||||
.update_entry(build_id.to_string(), |entry| {
|
||||
entry.building = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn build_inner(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
|
||||
let mut build = self
|
||||
.get_build_check_permissions(build_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
build.version.increment();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Build(build_id.to_string()),
|
||||
operation: Operation::BuildBuild,
|
||||
start_ts: monitor_timestamp(),
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
version: build.version.clone().into(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let (server, aws_client) = if let Some(server_id) = &build.server_id {
|
||||
let server = self.db.get_server(server_id).await;
|
||||
if let Err(e) = server {
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.success = false;
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("get build server", format!("{e:#?}")));
|
||||
self.update_update(update.clone()).await?;
|
||||
return Err(e);
|
||||
}
|
||||
let server = Ec2Instance {
|
||||
instance_id: String::new(),
|
||||
server: server.unwrap(),
|
||||
};
|
||||
(server, None)
|
||||
} else if build.aws_config.is_some() {
|
||||
let start_ts = monitor_timestamp();
|
||||
let res = self.create_ec2_instance_for_build(&build).await;
|
||||
if let Err(e) = res {
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.success = false;
|
||||
update.logs.push(Log {
|
||||
stage: "start build server".to_string(),
|
||||
stderr: format!("{e:#?}"),
|
||||
success: false,
|
||||
start_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
});
|
||||
self.update_update(update).await?;
|
||||
return Err(e);
|
||||
}
|
||||
let (server, aws_client, logs) = res.unwrap();
|
||||
update.logs.extend(logs);
|
||||
self.update_update(update.clone()).await?;
|
||||
(server, aws_client)
|
||||
} else {
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.success = false;
|
||||
update.logs.push(Log::error(
|
||||
"start build",
|
||||
"build has neither server_id nor aws_config attached".to_string(),
|
||||
));
|
||||
self.update_update(update).await?;
|
||||
return Err(anyhow!(
|
||||
"build has neither server_id or aws_config attached"
|
||||
));
|
||||
};
|
||||
|
||||
let clone_success = match self.periphery.clone_repo(&server.server, &build).await {
|
||||
Ok(clone_logs) => {
|
||||
update.logs.extend(clone_logs);
|
||||
all_logs_success(&update.logs)
|
||||
}
|
||||
Err(e) => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("clone repo", format!("{e:#?}")));
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !clone_success {
|
||||
let _ = self
|
||||
.periphery
|
||||
.delete_repo(&server.server, &build.name)
|
||||
.await;
|
||||
if let Some(aws_client) = aws_client {
|
||||
self.terminate_ec2_instance(aws_client, &server, &mut update)
|
||||
.await;
|
||||
}
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.success = false;
|
||||
self.update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
let build_logs = match self
|
||||
.periphery
|
||||
.build(&server.server, &build)
|
||||
.await
|
||||
.context("failed at call to periphery to build")
|
||||
{
|
||||
Ok(logs) => logs,
|
||||
Err(e) => Some(vec![Log::error("build", format!("{e:#?}"))]),
|
||||
};
|
||||
|
||||
match build_logs {
|
||||
Some(logs) => {
|
||||
let success = all_logs_success(&logs);
|
||||
update.logs.extend(logs);
|
||||
if success {
|
||||
let _ = self
|
||||
.db
|
||||
.builds
|
||||
.update_one::<Build>(
|
||||
build_id,
|
||||
mungos::Update::Set(doc! {
|
||||
"version": to_bson(&build.version)
|
||||
.context("failed at converting version to bson")?,
|
||||
"last_built_at": monitor_timestamp(),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("build", "builder busy".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let _ = self
|
||||
.periphery
|
||||
.delete_repo(&server.server, &build.name)
|
||||
.await;
|
||||
|
||||
if let Some(aws_client) = aws_client {
|
||||
self.terminate_ec2_instance(aws_client, &server, &mut update)
|
||||
.await;
|
||||
}
|
||||
|
||||
self.handle_post_build_redeploy(build_id, &mut update).await;
|
||||
|
||||
update.success = all_logs_success(&update.logs);
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn handle_post_build_redeploy(&self, build_id: &str, update: &mut Update) {
|
||||
let redeploy_deployments = self
|
||||
.db
|
||||
.deployments
|
||||
.get_some(
|
||||
doc! { "build_id": build_id, "redeploy_on_build": true },
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Ok(deployments) = redeploy_deployments {
|
||||
let futures = deployments.into_iter().map(|d| async move {
|
||||
let request_user = RequestUser {
|
||||
id: "auto redeploy".to_string(),
|
||||
is_admin: true,
|
||||
..Default::default()
|
||||
};
|
||||
let state = self
|
||||
.get_deployment_with_container_state(&request_user, &d.id)
|
||||
.await
|
||||
.map(|r| r.state)
|
||||
.unwrap_or_default();
|
||||
if state == DockerContainerState::Running {
|
||||
Some((
|
||||
d.id.clone(),
|
||||
self.deploy_container(
|
||||
&d.id,
|
||||
&RequestUser {
|
||||
id: "auto redeploy".to_string(),
|
||||
is_admin: true,
|
||||
..Default::default()
|
||||
},
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let redeploy_results = join_all(futures).await;
|
||||
|
||||
let mut redeploys = Vec::<String>::new();
|
||||
let mut redeploy_failures = Vec::<String>::new();
|
||||
|
||||
for res in redeploy_results {
|
||||
if res.is_none() {
|
||||
continue;
|
||||
}
|
||||
let (id, res) = res.unwrap();
|
||||
match res {
|
||||
Ok(_) => redeploys.push(id),
|
||||
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
|
||||
}
|
||||
}
|
||||
|
||||
if redeploys.len() > 0 {
|
||||
update.logs.push(Log::simple(
|
||||
"redeploy",
|
||||
format!("redeployed deployments: {}", redeploys.join(", ")),
|
||||
))
|
||||
}
|
||||
|
||||
if redeploy_failures.len() > 0 {
|
||||
update.logs.push(Log::simple(
|
||||
"redeploy failures",
|
||||
redeploy_failures.join("\n"),
|
||||
))
|
||||
}
|
||||
} else if let Err(e) = redeploy_deployments {
|
||||
update.logs.push(Log::simple(
|
||||
"redeploys failed",
|
||||
format!("failed to get deployments to redeploy: {e:#?}"),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_ec2_instance_for_build(
|
||||
&self,
|
||||
build: &Build,
|
||||
) -> anyhow::Result<(Ec2Instance, Option<Client>, Vec<Log>)> {
|
||||
if build.aws_config.is_none() {
|
||||
return Err(anyhow!("build has no aws_config attached"));
|
||||
}
|
||||
let start_instance_ts = monitor_timestamp();
|
||||
let aws_config = build.aws_config.as_ref().unwrap();
|
||||
let region = aws_config
|
||||
.region
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_region)
|
||||
.to_string();
|
||||
let aws_client = create_ec2_client(
|
||||
region,
|
||||
&self.config.aws.access_key_id,
|
||||
self.config.aws.secret_access_key.clone(),
|
||||
)
|
||||
.await;
|
||||
let ami_name = aws_config
|
||||
.ami_name
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_ami_name);
|
||||
let ami_id = &self
|
||||
.config
|
||||
.aws
|
||||
.available_ami_accounts
|
||||
.get(ami_name)
|
||||
.ok_or(anyhow!("no ami id associated with ami name {ami_name}"))?
|
||||
.ami_id;
|
||||
let instance_type = aws_config
|
||||
.instance_type
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_instance_type);
|
||||
let subnet_id = aws_config
|
||||
.subnet_id
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_subnet_id);
|
||||
let security_group_ids = aws_config
|
||||
.security_group_ids
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_security_group_ids)
|
||||
.to_owned();
|
||||
let readable_sec_group_ids = security_group_ids.join(", ");
|
||||
let volume_size_gb = *aws_config
|
||||
.volume_gb
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_volume_gb);
|
||||
let key_pair_name = aws_config
|
||||
.key_pair_name
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_key_pair_name);
|
||||
let assign_public_ip = *aws_config
|
||||
.assign_public_ip
|
||||
.as_ref()
|
||||
.unwrap_or(&self.config.aws.default_assign_public_ip);
|
||||
let instance = create_instance_with_ami(
|
||||
&aws_client,
|
||||
&format!("BUILDER-{}-v{}", build.name, build.version.to_string()),
|
||||
ami_id,
|
||||
instance_type,
|
||||
subnet_id,
|
||||
security_group_ids,
|
||||
volume_size_gb,
|
||||
key_pair_name,
|
||||
assign_public_ip,
|
||||
)
|
||||
.await?;
|
||||
let instance_id = &instance.instance_id;
|
||||
let start_log = Log {
|
||||
stage: "start build instance".to_string(),
|
||||
success: true,
|
||||
stdout: format!("instance id: {instance_id}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_size_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}"),
|
||||
start_ts: start_instance_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
};
|
||||
let start_connect_ts = monitor_timestamp();
|
||||
let mut res = Ok(String::new());
|
||||
for _ in 0..BUILDER_POLL_MAX_TRIES {
|
||||
let version = self.periphery.get_version(&instance.server).await;
|
||||
if let Ok(version) = version {
|
||||
let connect_log = Log {
|
||||
stage: "build instance connected".to_string(),
|
||||
success: true,
|
||||
stdout: format!("established contact with periphery on builder\nperiphery version: v{version}"),
|
||||
start_ts: start_connect_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
};
|
||||
return Ok((instance, Some(aws_client), vec![start_log, connect_log]));
|
||||
}
|
||||
res = version;
|
||||
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS)).await;
|
||||
}
|
||||
let _ = terminate_ec2_instance(&aws_client, &instance.instance_id).await;
|
||||
Err(anyhow!(
|
||||
"unable to reach periphery agent on build server\n{res:#?}"
|
||||
))
|
||||
}
|
||||
|
||||
async fn terminate_ec2_instance(
|
||||
&self,
|
||||
aws_client: Client,
|
||||
server: &Ec2Instance,
|
||||
update: &mut Update,
|
||||
) {
|
||||
let res = terminate_ec2_instance(&aws_client, &server.instance_id).await;
|
||||
if let Err(e) = res {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("terminate instance", format!("{e:#?}")))
|
||||
} else {
|
||||
update.logs.push(Log::simple(
|
||||
"terminate instance",
|
||||
format!("terminate instance id {}", server.instance_id),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// pub async fn reclone_build(
|
||||
// &self,
|
||||
// build_id: &str,
|
||||
// user: &RequestUser,
|
||||
// ) -> anyhow::Result<Update> {
|
||||
// if self.build_busy(build_id).await {
|
||||
// return Err(anyhow!("build busy"));
|
||||
// }
|
||||
// {
|
||||
// let mut lock = self.build_action_states.lock().await;
|
||||
// let entry = lock.entry(build_id.to_string()).or_default();
|
||||
// entry.recloning = true;
|
||||
// }
|
||||
// let res = self.reclone_build_inner(build_id, user).await;
|
||||
// {
|
||||
// let mut lock = self.build_action_states.lock().await;
|
||||
// let entry = lock.entry(build_id.to_string()).or_default();
|
||||
// entry.recloning = false;
|
||||
// }
|
||||
// res
|
||||
// }
|
||||
|
||||
// async fn reclone_build_inner(
|
||||
// &self,
|
||||
// build_id: &str,
|
||||
// user: &RequestUser,
|
||||
// ) -> anyhow::Result<Update> {
|
||||
// let build = self
|
||||
// .get_build_check_permissions(build_id, user, PermissionLevel::Update)
|
||||
// .await?;
|
||||
// let server = self.db.get_server(&build.server_id).await?;
|
||||
// let mut update = Update {
|
||||
// target: UpdateTarget::Build(build_id.to_string()),
|
||||
// operation: Operation::RecloneBuild,
|
||||
// start_ts: monitor_timestamp(),
|
||||
// status: UpdateStatus::InProgress,
|
||||
// operator: user.id.clone(),
|
||||
// success: true,
|
||||
// ..Default::default()
|
||||
// };
|
||||
// update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
// update.success = match self.periphery.clone_repo(&server, &build).await {
|
||||
// Ok(clone_logs) => {
|
||||
// update.logs.extend(clone_logs);
|
||||
// true
|
||||
// }
|
||||
// Err(e) => {
|
||||
// update
|
||||
// .logs
|
||||
// .push(Log::error("clone repo", format!("{e:#?}")));
|
||||
// false
|
||||
// }
|
||||
// };
|
||||
|
||||
// update.status = UpdateStatus::Complete;
|
||||
// update.end_ts = Some(monitor_timestamp());
|
||||
|
||||
// self.update_update(update.clone()).await?;
|
||||
|
||||
// Ok(update)
|
||||
// }
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use diff::Diff;
|
||||
use helpers::all_logs_success;
|
||||
use types::{
|
||||
monitor_timestamp, traits::Permissioned, Log, Operation, PeripheryCommand,
|
||||
PeripheryCommandBuilder, PermissionLevel, Update, UpdateStatus, UpdateTarget,
|
||||
};
|
||||
|
||||
use crate::{auth::RequestUser, state::State};
|
||||
|
||||
impl State {
|
||||
pub async fn get_command_check_permissions(
|
||||
&self,
|
||||
command_id: &str,
|
||||
user: &RequestUser,
|
||||
permission_level: PermissionLevel,
|
||||
) -> anyhow::Result<PeripheryCommand> {
|
||||
let command = self.db.get_command(command_id).await?;
|
||||
let permissions = command.get_user_permissions(&user.id);
|
||||
if user.is_admin || permissions >= permission_level {
|
||||
Ok(command)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"user does not have required permissions on this command"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_command(
|
||||
&self,
|
||||
name: &str,
|
||||
server_id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<PeripheryCommand> {
|
||||
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
let command = PeripheryCommandBuilder::default()
|
||||
.name(name.to_string())
|
||||
.server_id(server_id)
|
||||
.build()
|
||||
.context("failed to build command")?;
|
||||
let command_id = self
|
||||
.db
|
||||
.commands
|
||||
.create_one(command)
|
||||
.await
|
||||
.context("failed at adding command to db")?;
|
||||
let command = self.db.get_command(&command_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Command(command_id),
|
||||
operation: Operation::CreateCommand,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(command)
|
||||
}
|
||||
|
||||
pub async fn create_full_command(
|
||||
&self,
|
||||
mut command: PeripheryCommand,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<PeripheryCommand> {
|
||||
command.id = self
|
||||
.create_command(&command.name, command.server_id.clone(), user)
|
||||
.await?
|
||||
.id;
|
||||
let command = self.update_command(command, user).await?;
|
||||
Ok(command)
|
||||
}
|
||||
|
||||
pub async fn copy_command(
|
||||
&self,
|
||||
target_id: &str,
|
||||
new_name: String,
|
||||
new_server_id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<PeripheryCommand> {
|
||||
let mut command = self
|
||||
.get_command_check_permissions(target_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
command.name = new_name;
|
||||
command.server_id = new_server_id;
|
||||
let command = self.create_full_command(command, user).await?;
|
||||
Ok(command)
|
||||
}
|
||||
|
||||
pub async fn delete_command(
|
||||
&self,
|
||||
command_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<PeripheryCommand> {
|
||||
if self.command_action_states.busy(command_id).await {
|
||||
return Err(anyhow!("command busy"));
|
||||
}
|
||||
let command = self
|
||||
.get_command_check_permissions(command_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
self.db.commands.delete_one(command_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Command(command_id.to_string()),
|
||||
operation: Operation::DeleteCommand,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
logs: vec![Log::simple(
|
||||
"delete command",
|
||||
format!("deleted command {}", command.name),
|
||||
)],
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(command)
|
||||
}
|
||||
|
||||
pub async fn update_command(
|
||||
&self,
|
||||
mut new_command: PeripheryCommand,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<PeripheryCommand> {
|
||||
let current_command = self
|
||||
.get_command_check_permissions(&new_command.id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
// none of these should be changed through this method
|
||||
new_command.permissions = current_command.permissions.clone();
|
||||
new_command.server_id = current_command.server_id.clone();
|
||||
new_command.created_at = current_command.created_at.clone();
|
||||
new_command.updated_at = start_ts.clone();
|
||||
|
||||
self.db
|
||||
.commands
|
||||
.update_one(
|
||||
&new_command.id,
|
||||
mungos::Update::Regular(new_command.clone()),
|
||||
)
|
||||
.await
|
||||
.context("failed at update one command")?;
|
||||
|
||||
let diff = current_command.diff(&new_command);
|
||||
|
||||
let update = Update {
|
||||
operation: Operation::UpdateCommand,
|
||||
target: UpdateTarget::Command(new_command.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::Complete,
|
||||
logs: vec![Log::simple(
|
||||
"command update",
|
||||
serde_json::to_string_pretty(&diff).unwrap(),
|
||||
)],
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.add_update(update.clone()).await?;
|
||||
|
||||
self.update_update(update).await?;
|
||||
|
||||
Ok(new_command)
|
||||
}
|
||||
|
||||
pub async fn run_command(
|
||||
&self,
|
||||
command_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.command_action_states.busy(command_id).await {
|
||||
return Err(anyhow!("command busy"));
|
||||
}
|
||||
self.command_action_states
|
||||
.update_entry(command_id.to_string(), |entry| {
|
||||
entry.running = true;
|
||||
})
|
||||
.await;
|
||||
let res = self.run_command_inner(command_id, user).await;
|
||||
self.command_action_states
|
||||
.update_entry(command_id.to_string(), |entry| {
|
||||
entry.running = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn run_command_inner(
|
||||
&self,
|
||||
command_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let command = self
|
||||
.get_command_check_permissions(command_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
if command.command.path.is_empty() || command.command.command.is_empty() {
|
||||
return Err(anyhow!("command or path is empty, aborting"));
|
||||
}
|
||||
|
||||
let server = self.db.get_server(&command.server_id).await?;
|
||||
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Command(command_id.to_string()),
|
||||
operation: Operation::RunCommand,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
match self.periphery.run_command(&server, &command.command).await {
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("clone repo", format!("{e:#?}")));
|
||||
}
|
||||
}
|
||||
|
||||
update.success = all_logs_success(&update.logs);
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -1,906 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use diff::Diff;
|
||||
use helpers::{all_logs_success, to_monitor_name};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use types::{
|
||||
monitor_timestamp, traits::Permissioned, Deployment, DeploymentBuilder,
|
||||
DeploymentWithContainerState, DockerContainerState, Log, Operation, PermissionLevel,
|
||||
ServerStatus, ServerWithStatus, TerminationSignal, Update, UpdateStatus, UpdateTarget,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
auth::RequestUser,
|
||||
helpers::{any_option_diff_is_some, empty_or_only_spaces, get_image_name, option_diff_is_some},
|
||||
state::State,
|
||||
};
|
||||
|
||||
impl State {
|
||||
pub async fn get_deployment_check_permissions(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
permission_level: PermissionLevel,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
let deployment = self.db.get_deployment(deployment_id).await?;
|
||||
let permissions = deployment.get_user_permissions(&user.id);
|
||||
if user.is_admin || permissions >= permission_level {
|
||||
Ok(deployment)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"user does not have required permissions on this deployment"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_deployment(
|
||||
&self,
|
||||
name: &str,
|
||||
server_id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut deployment = DeploymentBuilder::default()
|
||||
.name(to_monitor_name(name))
|
||||
.server_id(server_id)
|
||||
.build()
|
||||
.context("failed to build deployment")?;
|
||||
deployment.permissions = [(user.id.clone(), PermissionLevel::Update)]
|
||||
.into_iter()
|
||||
.collect();
|
||||
deployment.created_at = start_ts.clone();
|
||||
deployment.updated_at = start_ts.clone();
|
||||
|
||||
let deployment_id = self
|
||||
.db
|
||||
.deployments
|
||||
.create_one(deployment)
|
||||
.await
|
||||
.context("failed to add deployment to db")?;
|
||||
let deployment = self.db.get_deployment(&deployment_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id),
|
||||
operation: Operation::CreateDeployment,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(deployment)
|
||||
}
|
||||
|
||||
pub async fn create_full_deployment(
|
||||
&self,
|
||||
mut deployment: Deployment,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
deployment.id = self
|
||||
.create_deployment(&deployment.name, deployment.server_id.clone(), user)
|
||||
.await?
|
||||
.id;
|
||||
let deployment = self.update_deployment(deployment, user).await?;
|
||||
Ok(deployment)
|
||||
}
|
||||
|
||||
pub async fn copy_deployment(
|
||||
&self,
|
||||
target_id: &str,
|
||||
new_name: String,
|
||||
new_server_id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
let mut deployment = self
|
||||
.get_deployment_check_permissions(target_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
deployment.name = new_name;
|
||||
deployment.server_id = new_server_id;
|
||||
let deployment = self.create_full_deployment(deployment, user).await?;
|
||||
Ok(deployment)
|
||||
}
|
||||
|
||||
pub async fn delete_deployment(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let log = match self
|
||||
.periphery
|
||||
.container_remove(&server, &deployment.name, stop_signal, stop_time)
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error("destroy container", format!("{e:#?}")),
|
||||
};
|
||||
self.db
|
||||
.deployments
|
||||
.delete_one(deployment_id)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at deleting deployment at {deployment_id} from mongo"
|
||||
))?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::DeleteDeployment,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
logs: vec![
|
||||
log,
|
||||
Log::simple(
|
||||
"delete deployment",
|
||||
format!(
|
||||
"deleted deployment {} on server {}",
|
||||
deployment.name, server.name
|
||||
),
|
||||
),
|
||||
],
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(deployment)
|
||||
}
|
||||
|
||||
pub async fn update_deployment(
|
||||
&self,
|
||||
new_deployment: Deployment,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
if self.deployment_action_states.busy(&new_deployment.id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
let id = new_deployment.id.clone();
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(id.clone(), |entry| {
|
||||
entry.updating = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.update_deployment_inner(new_deployment, user).await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(id.clone(), |entry| {
|
||||
entry.updating = false;
|
||||
})
|
||||
.await;
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
async fn update_deployment_inner(
|
||||
&self,
|
||||
mut new_deployment: Deployment,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
let current_deployment = self
|
||||
.get_deployment_check_permissions(&new_deployment.id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
// none of these should be changed through this method
|
||||
new_deployment.name = current_deployment.name.clone();
|
||||
new_deployment.permissions = current_deployment.permissions.clone();
|
||||
new_deployment.server_id = current_deployment.server_id.clone();
|
||||
new_deployment.created_at = current_deployment.created_at.clone();
|
||||
new_deployment.updated_at = start_ts.clone();
|
||||
|
||||
// filter out any volumes, ports, env vars, extra args which are or contain empty strings
|
||||
// these could only happen by accident
|
||||
new_deployment.docker_run_args.volumes = new_deployment
|
||||
.docker_run_args
|
||||
.volumes
|
||||
.into_iter()
|
||||
.filter(|v| !empty_or_only_spaces(&v.local) && !empty_or_only_spaces(&v.container))
|
||||
.collect();
|
||||
new_deployment.docker_run_args.ports = new_deployment
|
||||
.docker_run_args
|
||||
.ports
|
||||
.into_iter()
|
||||
.filter(|p| !empty_or_only_spaces(&p.local) && !empty_or_only_spaces(&p.container))
|
||||
.collect();
|
||||
new_deployment.docker_run_args.environment = new_deployment
|
||||
.docker_run_args
|
||||
.environment
|
||||
.into_iter()
|
||||
.filter(|e| !empty_or_only_spaces(&e.variable) && !empty_or_only_spaces(&e.value))
|
||||
.collect();
|
||||
new_deployment.docker_run_args.extra_args = new_deployment
|
||||
.docker_run_args
|
||||
.extra_args
|
||||
.into_iter()
|
||||
.filter(|a| a.len() != 0)
|
||||
.collect();
|
||||
|
||||
self.db
|
||||
.deployments
|
||||
.update_one(
|
||||
&new_deployment.id,
|
||||
mungos::Update::Regular(new_deployment.clone()),
|
||||
)
|
||||
.await
|
||||
.context("failed at update one deployment")?;
|
||||
|
||||
let diff = current_deployment.diff(&new_deployment);
|
||||
|
||||
let mut update = Update {
|
||||
operation: Operation::UpdateDeployment,
|
||||
target: UpdateTarget::Deployment(new_deployment.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
logs: vec![Log::simple(
|
||||
"deployment update",
|
||||
serde_json::to_string_pretty(&diff).unwrap(),
|
||||
)],
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
if any_option_diff_is_some(&[&diff.repo, &diff.branch, &diff.github_account])
|
||||
|| option_diff_is_some(&diff.on_clone)
|
||||
{
|
||||
let server = self.db.get_server(¤t_deployment.server_id).await?;
|
||||
match self.periphery.clone_repo(&server, &new_deployment).await {
|
||||
Ok(clone_logs) => {
|
||||
update.logs.extend(clone_logs);
|
||||
}
|
||||
Err(e) => update
|
||||
.logs
|
||||
.push(Log::error("cloning repo", format!("{e:#?}"))),
|
||||
}
|
||||
}
|
||||
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.success = all_logs_success(&update.logs);
|
||||
update.status = UpdateStatus::Complete;
|
||||
|
||||
self.update_update(update).await?;
|
||||
|
||||
Ok(new_deployment)
|
||||
}
|
||||
|
||||
pub async fn rename_deployment(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
new_name: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.renaming = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self
|
||||
.rename_deployment_inner(deployment_id, new_name, user)
|
||||
.await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.renaming = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn rename_deployment_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
new_name: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::RenameDeployment,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.to_string(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
let server_with_status = self.get_server(&deployment.server_id, user).await;
|
||||
if server_with_status.is_err() {
|
||||
update.logs.push(Log::error(
|
||||
"get server",
|
||||
format!(
|
||||
"failed to get server info: {:?}",
|
||||
server_with_status.as_ref().err().unwrap()
|
||||
),
|
||||
));
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = monitor_timestamp().into();
|
||||
update.success = false;
|
||||
self.update_update(update).await?;
|
||||
return Err(server_with_status.err().unwrap());
|
||||
}
|
||||
let ServerWithStatus { server, status } = server_with_status.unwrap();
|
||||
if status != ServerStatus::Ok {
|
||||
update.logs.push(Log::error(
|
||||
"check server status",
|
||||
String::from("cannot rename deployment when periphery is disabled or unreachable"),
|
||||
));
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = monitor_timestamp().into();
|
||||
update.success = false;
|
||||
self.update_update(update).await?;
|
||||
return Err(anyhow!(
|
||||
"cannot rename deployment when periphery is disabled or unreachable"
|
||||
));
|
||||
}
|
||||
let deployment_state = self
|
||||
.get_deployment_with_container_state(user, deployment_id)
|
||||
.await;
|
||||
if deployment_state.is_err() {
|
||||
update.logs.push(Log::error(
|
||||
"check deployment status",
|
||||
format!(
|
||||
"could not get current state of deployment: {:?}",
|
||||
deployment_state.as_ref().err().unwrap()
|
||||
),
|
||||
));
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = monitor_timestamp().into();
|
||||
update.success = false;
|
||||
self.update_update(update).await?;
|
||||
return Err(deployment_state.err().unwrap());
|
||||
}
|
||||
let DeploymentWithContainerState {
|
||||
deployment, state, ..
|
||||
} = deployment_state.unwrap();
|
||||
if state != DockerContainerState::NotDeployed {
|
||||
let log = self
|
||||
.periphery
|
||||
.container_rename(&server, &deployment.name, new_name)
|
||||
.await;
|
||||
if log.is_err() {
|
||||
update.logs.push(Log::error(
|
||||
"rename container",
|
||||
format!("{:?}", log.as_ref().err().unwrap()),
|
||||
));
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = monitor_timestamp().into();
|
||||
update.success = false;
|
||||
self.update_update(update).await?;
|
||||
return Err(log.err().unwrap());
|
||||
}
|
||||
let log = log.unwrap();
|
||||
if !log.success {
|
||||
update.logs.push(log);
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = monitor_timestamp().into();
|
||||
update.success = false;
|
||||
self.update_update(update).await?;
|
||||
return Err(anyhow!("rename container on periphery not successful"));
|
||||
}
|
||||
update.logs.push(log);
|
||||
}
|
||||
let res = self
|
||||
.db
|
||||
.deployments
|
||||
.update_one(
|
||||
deployment_id,
|
||||
mungos::Update::<()>::Set(
|
||||
doc! { "name": to_monitor_name(new_name), "updated_at": monitor_timestamp() },
|
||||
),
|
||||
)
|
||||
.await
|
||||
.context("failed to update deployment name on mongo");
|
||||
if let Err(e) = res {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("mongo update", format!("{e:?}")));
|
||||
} else {
|
||||
update.logs.push(Log::simple(
|
||||
"mongo update",
|
||||
String::from("updated name on mongo"),
|
||||
))
|
||||
}
|
||||
|
||||
if deployment.repo.is_some() {
|
||||
let res = self.reclone_deployment(deployment_id, user, false).await;
|
||||
if let Err(e) = res {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("reclone repo", format!("{e:?}")));
|
||||
} else {
|
||||
update.logs.push(Log::simple(
|
||||
"reclone repo",
|
||||
"deployment repo cloned with new name".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
update.end_ts = monitor_timestamp().into();
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.success = all_logs_success(&update.logs);
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn reclone_deployment(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
check_deployment_busy: bool,
|
||||
) -> anyhow::Result<Update> {
|
||||
if check_deployment_busy && self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.recloning = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.reclone_deployment_inner(deployment_id, user).await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.recloning = false;
|
||||
})
|
||||
.await;
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
async fn reclone_deployment_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::RecloneDeployment,
|
||||
start_ts: monitor_timestamp(),
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
match self.periphery.clone_repo(&server, &deployment).await {
|
||||
Ok(clone_logs) => {
|
||||
update.logs.extend(clone_logs);
|
||||
}
|
||||
Err(e) => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("clone repo", format!("{e:#?}")));
|
||||
}
|
||||
};
|
||||
|
||||
update.success = all_logs_success(&update.logs);
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn deploy_container(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.deploying = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self
|
||||
.deploy_container_inner(deployment_id, user, stop_signal, stop_time)
|
||||
.await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.deploying = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn deploy_container_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let mut deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let version = if let Some(build_id) = &deployment.build_id {
|
||||
let build = self.db.get_build(build_id).await?;
|
||||
let image = get_image_name(&build);
|
||||
if deployment.docker_run_args.docker_account.is_none() {
|
||||
if let Some(docker_account) = &build.docker_account {
|
||||
deployment.docker_run_args.docker_account = Some(docker_account.to_string())
|
||||
};
|
||||
}
|
||||
let version = if let Some(version) = &deployment.build_version {
|
||||
version.clone()
|
||||
} else {
|
||||
build.version.clone()
|
||||
};
|
||||
deployment.docker_run_args.image = format!("{image}:{}", version.to_string());
|
||||
Some(version)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::DeployContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
version,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
|
||||
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
|
||||
|
||||
let deploy_log = match self
|
||||
.periphery
|
||||
.deploy(&server, &deployment, stop_signal, stop_time)
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error("deploy container", format!("{e:#?}")),
|
||||
};
|
||||
|
||||
update.success = deploy_log.success;
|
||||
update.logs.push(deploy_log);
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn start_container(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.starting = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.start_container_inner(deployment_id, user).await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.starting = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn start_container_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::StartContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let log = self
|
||||
.periphery
|
||||
.container_start(&server, &deployment.name)
|
||||
.await;
|
||||
|
||||
update.success = match log {
|
||||
Ok(log) => {
|
||||
let success = log.success;
|
||||
update.logs.push(log);
|
||||
success
|
||||
}
|
||||
Err(e) => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("start container", format!("{e:#?}")));
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.status = UpdateStatus::Complete;
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn stop_container(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.stopping = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self
|
||||
.stop_container_inner(deployment_id, user, stop_signal, stop_time)
|
||||
.await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.stopping = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn stop_container_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::StopContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
|
||||
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
|
||||
|
||||
let log = self
|
||||
.periphery
|
||||
.container_stop(&server, &deployment.name, stop_signal, stop_time)
|
||||
.await;
|
||||
|
||||
update.success = match log {
|
||||
Ok(log) => {
|
||||
let success = log.success;
|
||||
update.logs.push(log);
|
||||
success
|
||||
}
|
||||
Err(e) => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("stop container", format!("{e:#?}")));
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.status = UpdateStatus::Complete;
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn remove_container(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.removing = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self
|
||||
.remove_container_inner(deployment_id, user, stop_signal, stop_time)
|
||||
.await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.removing = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn remove_container_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::RemoveContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
|
||||
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
|
||||
|
||||
let log = self
|
||||
.periphery
|
||||
.container_remove(&server, &deployment.name, stop_signal, stop_time)
|
||||
.await;
|
||||
|
||||
update.success = match log {
|
||||
Ok(log) => {
|
||||
let success = log.success;
|
||||
update.logs.push(log);
|
||||
success
|
||||
}
|
||||
Err(e) => {
|
||||
update
|
||||
.logs
|
||||
.push(Log::error("remove container", format!("{e:#?}")));
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.status = UpdateStatus::Complete;
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn pull_deployment_repo(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.deployment_action_states.busy(deployment_id).await {
|
||||
return Err(anyhow!("deployment busy"));
|
||||
}
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.pulling = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.pull_deployment_repo_inner(deployment_id, user).await;
|
||||
|
||||
self.deployment_action_states
|
||||
.update_entry(deployment_id.to_string(), |entry| {
|
||||
entry.pulling = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn pull_deployment_repo_inner(
|
||||
&self,
|
||||
deployment_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Deployment(deployment_id.to_string()),
|
||||
operation: Operation::PullDeployment,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let logs = self
|
||||
.periphery
|
||||
.pull_repo(
|
||||
&server,
|
||||
&deployment.name,
|
||||
&deployment.branch,
|
||||
&deployment.on_pull,
|
||||
)
|
||||
.await?;
|
||||
|
||||
update.success = all_logs_success(&logs);
|
||||
update.logs.extend(logs);
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.status = UpdateStatus::Complete;
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use diff::Diff;
|
||||
use helpers::to_monitor_name;
|
||||
use types::{
|
||||
monitor_timestamp, traits::Permissioned, Group, Log, Operation, PermissionLevel, Update,
|
||||
UpdateStatus, UpdateTarget,
|
||||
};
|
||||
|
||||
use crate::{auth::RequestUser, state::State};
|
||||
|
||||
impl State {
|
||||
pub async fn get_group_check_permissions(
|
||||
&self,
|
||||
group_id: &str,
|
||||
user: &RequestUser,
|
||||
permission_level: PermissionLevel,
|
||||
) -> anyhow::Result<Group> {
|
||||
let group = self.db.get_group(group_id).await?;
|
||||
let permissions = group.get_user_permissions(&user.id);
|
||||
if user.is_admin || permissions >= permission_level {
|
||||
Ok(group)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"user does not have required permissions on this group"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_group(&self, name: &str, user: &RequestUser) -> anyhow::Result<Group> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let group = Group {
|
||||
name: to_monitor_name(name),
|
||||
permissions: [(user.id.clone(), PermissionLevel::Update)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
created_at: start_ts.clone(),
|
||||
updated_at: start_ts.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let group_id = self
|
||||
.db
|
||||
.groups
|
||||
.create_one(group)
|
||||
.await
|
||||
.context("failed to add group to db")?;
|
||||
let group = self.db.get_group(&group_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Group(group_id),
|
||||
operation: Operation::CreateGroup,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(group)
|
||||
}
|
||||
|
||||
pub async fn create_full_group(
|
||||
&self,
|
||||
mut full_group: Group,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Group> {
|
||||
let group = self.create_group(&full_group.name, user).await?;
|
||||
full_group.id = group.id;
|
||||
let group = self.update_group(full_group, user).await?;
|
||||
Ok(group)
|
||||
}
|
||||
|
||||
pub async fn delete_group(&self, id: &str, user: &RequestUser) -> anyhow::Result<Group> {
|
||||
let group = self
|
||||
.get_group_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
self.db
|
||||
.groups
|
||||
.delete_one(id)
|
||||
.await
|
||||
.context(format!("failed at deleting group at {id} from mongo"))?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Group(id.to_string()),
|
||||
operation: Operation::DeleteGroup,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
logs: vec![Log::simple(
|
||||
"delete group",
|
||||
format!("deleted group {}", group.name),
|
||||
)],
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(group)
|
||||
}
|
||||
|
||||
pub async fn update_group(
|
||||
&self,
|
||||
mut new_group: Group,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Group> {
|
||||
let current_group = self
|
||||
.get_group_check_permissions(&new_group.id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
// none of these should be changed through this method
|
||||
new_group.name = current_group.name.clone();
|
||||
new_group.permissions = current_group.permissions.clone();
|
||||
new_group.created_at = current_group.created_at.clone();
|
||||
new_group.updated_at = start_ts.clone();
|
||||
|
||||
self.db
|
||||
.groups
|
||||
.update_one(&new_group.id, mungos::Update::Regular(new_group.clone()))
|
||||
.await
|
||||
.context("failed at update one group")?;
|
||||
|
||||
let diff = current_group.diff(&new_group);
|
||||
|
||||
let update = Update {
|
||||
operation: Operation::UpdateGroup,
|
||||
target: UpdateTarget::Group(new_group.id.clone()),
|
||||
end_ts: Some(start_ts.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::Complete,
|
||||
logs: vec![Log::simple(
|
||||
"group update",
|
||||
serde_json::to_string_pretty(&diff).unwrap(),
|
||||
)],
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.add_update(update).await?;
|
||||
|
||||
Ok(new_group)
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use types::Update;
|
||||
|
||||
use crate::state::State;
|
||||
|
||||
mod build;
|
||||
mod command;
|
||||
mod deployment;
|
||||
mod group;
|
||||
mod procedure;
|
||||
mod server;
|
||||
|
||||
impl State {
|
||||
pub async fn send_update(&self, update: Update) -> anyhow::Result<()> {
|
||||
self.update.sender.lock().await.send(update)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn add_update(&self, mut update: Update) -> anyhow::Result<String> {
|
||||
update.id = self
|
||||
.db
|
||||
.updates
|
||||
.create_one(update.clone())
|
||||
.await
|
||||
.context("failed to insert update into db")?
|
||||
.to_string();
|
||||
let id = update.id.clone();
|
||||
let _ = self.send_update(update).await;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn update_update(&self, mut update: Update) -> anyhow::Result<()> {
|
||||
let mut update_id = String::new();
|
||||
std::mem::swap(&mut update.id, &mut update_id);
|
||||
self.db
|
||||
.updates
|
||||
.update_one(&update_id, mungos::Update::Regular(update.clone()))
|
||||
.await
|
||||
.context("failed to update the update on db. the update build process was deleted")?;
|
||||
std::mem::swap(&mut update.id, &mut update_id);
|
||||
let _ = self.send_update(update).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,299 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use diff::Diff;
|
||||
use helpers::to_monitor_name;
|
||||
use types::{
|
||||
monitor_timestamp, traits::Permissioned, Log, Operation, PermissionLevel, Procedure,
|
||||
ProcedureOperation::*, ProcedureStage, Update, UpdateStatus, UpdateTarget,
|
||||
};
|
||||
|
||||
use crate::{auth::RequestUser, state::State};
|
||||
|
||||
impl State {
|
||||
pub async fn get_procedure_check_permissions(
|
||||
&self,
|
||||
procedure_id: &str,
|
||||
user: &RequestUser,
|
||||
permission_level: PermissionLevel,
|
||||
) -> anyhow::Result<Procedure> {
|
||||
let procedure = self.db.get_procedure(procedure_id).await?;
|
||||
let permissions = procedure.get_user_permissions(&user.id);
|
||||
if user.is_admin || permissions >= permission_level {
|
||||
Ok(procedure)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"user does not have required permissions on this procedure"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_procedure(
|
||||
&self,
|
||||
name: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Procedure> {
|
||||
let start_ts = monitor_timestamp();
|
||||
let procedure = Procedure {
|
||||
name: to_monitor_name(name),
|
||||
permissions: [(user.id.clone(), PermissionLevel::Update)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
created_at: start_ts.clone(),
|
||||
updated_at: start_ts.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let procedure_id = self
|
||||
.db
|
||||
.procedures
|
||||
.create_one(procedure)
|
||||
.await
|
||||
.context("failed to add procedure to db")?;
|
||||
let procedure = self.db.get_procedure(&procedure_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Procedure(procedure_id),
|
||||
operation: Operation::CreateProcedure,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(procedure)
|
||||
}
|
||||
|
||||
pub async fn create_full_procedure(
|
||||
&self,
|
||||
mut full_procedure: Procedure,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Procedure> {
|
||||
let procedure = self.create_procedure(&full_procedure.name, user).await?;
|
||||
full_procedure.id = procedure.id;
|
||||
let procedure = self.update_procedure(full_procedure, user).await?;
|
||||
Ok(procedure)
|
||||
}
|
||||
|
||||
pub async fn delete_procedure(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Procedure> {
|
||||
let procedure = self
|
||||
.get_procedure_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
self.db
|
||||
.procedures
|
||||
.delete_one(id)
|
||||
.await
|
||||
.context(format!("failed at deleting procedure at {id} from mongo"))?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Procedure(id.to_string()),
|
||||
operation: Operation::DeleteProcedure,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
logs: vec![Log::simple(
|
||||
"delete procedure",
|
||||
format!("deleted procedure {}", procedure.name),
|
||||
)],
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
Ok(procedure)
|
||||
}
|
||||
|
||||
pub async fn update_procedure(
|
||||
&self,
|
||||
mut new_procedure: Procedure,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Procedure> {
|
||||
let current_procedure = self
|
||||
.get_procedure_check_permissions(&new_procedure.id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
// none of these should be changed through this method
|
||||
new_procedure.name = current_procedure.name.clone();
|
||||
new_procedure.permissions = current_procedure.permissions.clone();
|
||||
new_procedure.created_at = current_procedure.created_at.clone();
|
||||
new_procedure.updated_at = start_ts.clone();
|
||||
|
||||
for ProcedureStage {
|
||||
operation,
|
||||
target_id,
|
||||
} in &new_procedure.stages
|
||||
{
|
||||
match operation {
|
||||
BuildBuild => {
|
||||
self.get_build_check_permissions(&target_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
}
|
||||
DeployContainer | StartContainer | StopContainer | RemoveContainer
|
||||
| PullDeployment | RecloneDeployment => {
|
||||
self.get_deployment_check_permissions(
|
||||
target_id,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
PruneImagesServer | PruneContainersServer | PruneNetworksServer => {
|
||||
self.get_server_check_permissions(target_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
}
|
||||
RunProcedure => {
|
||||
self.get_procedure_check_permissions(target_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
self.db
|
||||
.procedures
|
||||
.update_one(
|
||||
&new_procedure.id,
|
||||
mungos::Update::Regular(new_procedure.clone()),
|
||||
)
|
||||
.await
|
||||
.context("failed at update one procedure")?;
|
||||
|
||||
let diff = current_procedure.diff(&new_procedure);
|
||||
|
||||
let update = Update {
|
||||
operation: Operation::UpdateProcedure,
|
||||
target: UpdateTarget::Procedure(new_procedure.id.clone()),
|
||||
end_ts: Some(start_ts.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::Complete,
|
||||
logs: vec![Log::simple(
|
||||
"procedure update",
|
||||
serde_json::to_string_pretty(&diff).unwrap(),
|
||||
)],
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.add_update(update).await?;
|
||||
|
||||
Ok(new_procedure)
|
||||
}
|
||||
|
||||
pub async fn run_procedure(
|
||||
&self,
|
||||
procedure_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<Update>> {
|
||||
let procedure = self
|
||||
.get_procedure_check_permissions(procedure_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let mut updates = Vec::new();
|
||||
for ProcedureStage {
|
||||
operation,
|
||||
target_id,
|
||||
} in procedure.stages
|
||||
{
|
||||
match operation {
|
||||
None => {}
|
||||
// deployment
|
||||
StartContainer => {
|
||||
let update = self
|
||||
.start_container(&target_id, user)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at start container for deployment (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
StopContainer => {
|
||||
let update = self
|
||||
.stop_container(&target_id, user, Option::None, Option::None)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at stop container for deployment (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
RemoveContainer => {
|
||||
let update = self
|
||||
.remove_container(&target_id, user, Option::None, Option::None)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at remove container for deployment (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
DeployContainer => {
|
||||
let update = self
|
||||
.deploy_container(&target_id, user, Option::None, Option::None)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at deploy container for deployment (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
RecloneDeployment => {
|
||||
let update = self
|
||||
.reclone_deployment(&target_id, user, true)
|
||||
.await
|
||||
.context(format!("failed at reclone deployment (id: {target_id})"))?;
|
||||
updates.push(update);
|
||||
}
|
||||
PullDeployment => {
|
||||
// implement this one
|
||||
let update = self
|
||||
.pull_deployment_repo(&target_id, user)
|
||||
.await
|
||||
.context(format!("failed at pull deployment (id: {target_id})"))?;
|
||||
updates.push(update);
|
||||
}
|
||||
// build
|
||||
BuildBuild => {
|
||||
let update = self
|
||||
.build(&target_id, user)
|
||||
.await
|
||||
.context(format!("failed at build (id: {target_id})"))?;
|
||||
updates.push(update);
|
||||
}
|
||||
// server
|
||||
PruneImagesServer => {
|
||||
let update = self.prune_images(&target_id, user).await.context(format!(
|
||||
"failed at prune images on server (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
PruneContainersServer => {
|
||||
let update = self
|
||||
.prune_containers(&target_id, user)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at prune containers on server (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
PruneNetworksServer => {
|
||||
let update = self
|
||||
.prune_networks(&target_id, user)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed at prune networks on servers (id: {target_id})"
|
||||
))?;
|
||||
updates.push(update);
|
||||
}
|
||||
// procedure
|
||||
RunProcedure => {
|
||||
// need to figure out async recursion
|
||||
// need to guard against infinite procedure loops when they are updated
|
||||
// let proc_updates = self
|
||||
// .run_procedure(&target_id, user)
|
||||
// .await
|
||||
// .context(format!("failed to run nested procedure (id: {target_id})"))?;
|
||||
// updates.extend(proc_updates);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(updates)
|
||||
}
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use diff::Diff;
|
||||
use futures_util::future::join_all;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use types::{
|
||||
monitor_timestamp, traits::Permissioned, Log, Operation, PermissionLevel, Server, Update,
|
||||
UpdateStatus, UpdateTarget,
|
||||
};
|
||||
|
||||
use crate::{auth::RequestUser, state::State};
|
||||
|
||||
impl State {
|
||||
pub async fn get_server_check_permissions(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
permission_level: PermissionLevel,
|
||||
) -> anyhow::Result<Server> {
|
||||
let server = self.db.get_server(server_id).await?;
|
||||
let permissions = server.get_user_permissions(&user.id);
|
||||
if user.is_admin || permissions >= permission_level {
|
||||
Ok(server)
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"user does not have required permissions on this server"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn create_server(
|
||||
&self,
|
||||
name: &str,
|
||||
address: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Server> {
|
||||
if !user.is_admin && !user.create_server_permissions {
|
||||
return Err(anyhow!(
|
||||
"user does not have permissions to add server (not admin)"
|
||||
));
|
||||
}
|
||||
let start_ts = monitor_timestamp();
|
||||
let server = Server {
|
||||
name: name.to_string(),
|
||||
address,
|
||||
permissions: [(user.id.clone(), PermissionLevel::Update)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
created_at: start_ts.clone(),
|
||||
updated_at: start_ts.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let server_id = self
|
||||
.db
|
||||
.servers
|
||||
.create_one(server)
|
||||
.await
|
||||
.context("failed to add server to db")?;
|
||||
let server = self.db.get_server(&server_id).await?;
|
||||
let update = Update {
|
||||
target: UpdateTarget::Server(server_id),
|
||||
operation: Operation::CreateServer,
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
self.add_update(update).await?;
|
||||
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
pub async fn create_full_server(
|
||||
&self,
|
||||
mut server: Server,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Server> {
|
||||
server.id = self
|
||||
.create_server(&server.name, server.address.clone(), user)
|
||||
.await?
|
||||
.id;
|
||||
let server = self.update_server(server, user).await?;
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
pub async fn delete_server(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Server> {
|
||||
if self.server_action_states.busy(server_id).await {
|
||||
return Err(anyhow!("server busy"));
|
||||
}
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Server(server_id.to_string()),
|
||||
operation: Operation::DeleteServer,
|
||||
start_ts,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
status: UpdateStatus::InProgress,
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let res = {
|
||||
let delete_deployments = self
|
||||
.db
|
||||
.deployments
|
||||
.get_some(doc! { "server_id": server_id }, None)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|d| async move { self.delete_deployment(&d.id, user, None, None).await });
|
||||
let delete_builds = self
|
||||
.db
|
||||
.builds
|
||||
.get_some(doc! { "server_id": server_id }, None)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|d| async move { self.delete_deployment(&d.id, user, None, None).await });
|
||||
let update_groups = self
|
||||
.db
|
||||
.groups
|
||||
.update_many(doc! {}, doc! { "$pull": { "servers": server_id } });
|
||||
let (dep_res, build_res, group_res) = tokio::join!(
|
||||
join_all(delete_deployments),
|
||||
join_all(delete_builds),
|
||||
update_groups
|
||||
);
|
||||
dep_res.into_iter().collect::<anyhow::Result<Vec<_>>>()?;
|
||||
build_res.into_iter().collect::<anyhow::Result<Vec<_>>>()?;
|
||||
group_res?;
|
||||
self.db.servers.delete_one(&server_id).await?;
|
||||
anyhow::Ok(())
|
||||
};
|
||||
|
||||
let log = match res {
|
||||
Ok(_) => Log::simple("delete server", format!("deleted server {}", server.name)),
|
||||
Err(e) => Log::error("delete server", format!("failed to delete server\n{e:#?}")),
|
||||
};
|
||||
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.success = log.success;
|
||||
update.logs.push(log);
|
||||
|
||||
self.update_update(update).await?;
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
pub async fn update_server(
|
||||
&self,
|
||||
mut new_server: Server,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Server> {
|
||||
if self.server_action_states.busy(&new_server.id).await {
|
||||
return Err(anyhow!("server busy"));
|
||||
}
|
||||
let current_server = self
|
||||
.get_server_check_permissions(&new_server.id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
new_server.permissions = current_server.permissions.clone();
|
||||
new_server.created_at = current_server.created_at.clone();
|
||||
new_server.updated_at = start_ts.clone();
|
||||
|
||||
let diff = current_server.diff(&new_server);
|
||||
|
||||
self.db
|
||||
.servers
|
||||
.update_one(&new_server.id, mungos::Update::Regular(new_server.clone()))
|
||||
.await
|
||||
.context("failed at update one server")?;
|
||||
|
||||
let update = Update {
|
||||
operation: Operation::UpdateServer,
|
||||
target: UpdateTarget::Server(new_server.id.clone()),
|
||||
start_ts,
|
||||
end_ts: Some(monitor_timestamp()),
|
||||
status: UpdateStatus::Complete,
|
||||
logs: vec![Log::simple(
|
||||
"server update",
|
||||
serde_json::to_string_pretty(&diff).unwrap(),
|
||||
)],
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.add_update(update).await?;
|
||||
Ok(new_server)
|
||||
}
|
||||
|
||||
pub async fn prune_networks(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.server_action_states.busy(server_id).await {
|
||||
return Err(anyhow!("server busy"));
|
||||
}
|
||||
self.server_action_states
|
||||
.update_entry(server_id.to_string(), |entry| {
|
||||
entry.pruning_networks = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.prune_networks_inner(server_id, user).await;
|
||||
|
||||
self.server_action_states
|
||||
.update_entry(server_id.to_string(), |entry| {
|
||||
entry.pruning_networks = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn prune_networks_inner(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Server(server_id.to_owned()),
|
||||
operation: Operation::PruneNetworksServer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let log = match self.periphery.network_prune(&server).await.context(format!(
|
||||
"failed to prune networks on server {}",
|
||||
server.name
|
||||
)) {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error("prune networks", format!("{e:#?}")),
|
||||
};
|
||||
|
||||
update.success = log.success;
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.logs.push(log);
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn prune_images(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.server_action_states.busy(server_id).await {
|
||||
return Err(anyhow!("server busy"));
|
||||
}
|
||||
self.server_action_states
|
||||
.update_entry(server_id.to_string(), |entry| {
|
||||
entry.pruning_images = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.prune_images_inner(server_id, user).await;
|
||||
|
||||
self.server_action_states
|
||||
.update_entry(server_id.to_string(), |entry| {
|
||||
entry.pruning_images = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn prune_images_inner(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
let start_ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Server(server_id.to_owned()),
|
||||
operation: Operation::PruneImagesServer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let log = match self
|
||||
.periphery
|
||||
.image_prune(&server)
|
||||
.await
|
||||
.context(format!("failed to prune images on server {}", server.name))
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error("prune images", format!("{e:#?}")),
|
||||
};
|
||||
|
||||
update.success = log.success;
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.logs.push(log);
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
pub async fn prune_containers(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
if self.server_action_states.busy(server_id).await {
|
||||
return Err(anyhow!("server busy"));
|
||||
}
|
||||
self.server_action_states
|
||||
.update_entry(server_id.to_string(), |entry| {
|
||||
entry.pruning_containers = true;
|
||||
})
|
||||
.await;
|
||||
|
||||
let res = self.prune_containers_inner(server_id, user).await;
|
||||
|
||||
self.server_action_states
|
||||
.update_entry(server_id.to_string(), |entry| {
|
||||
entry.pruning_containers = false;
|
||||
})
|
||||
.await;
|
||||
res
|
||||
}
|
||||
|
||||
async fn prune_containers_inner(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::Server(server_id.to_owned()),
|
||||
operation: Operation::PruneContainersServer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = self.add_update(update.clone()).await?;
|
||||
|
||||
let log = match self
|
||||
.periphery
|
||||
.container_prune(&server)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune containers on server {}",
|
||||
server.name
|
||||
)) {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error("prune containers", format!("{e:#?}")),
|
||||
};
|
||||
|
||||
update.success = log.success;
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.logs.push(log);
|
||||
|
||||
self.update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -1,435 +0,0 @@
|
||||
use std::{cmp::Ordering, collections::HashMap};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
routing::{delete, get, patch, post},
|
||||
Extension, Json, Router,
|
||||
};
|
||||
use futures_util::TryStreamExt;
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::{
|
||||
bson::{doc, Document},
|
||||
options::FindOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{
|
||||
monitor_ts_from_unix, traits::Permissioned, unix_from_monitor_ts, AwsBuilderConfig, Build,
|
||||
BuildActionState, BuildVersionsReponse, Operation, PermissionLevel, UpdateStatus,
|
||||
};
|
||||
use typeshare::typeshare;
|
||||
|
||||
const NUM_VERSIONS_PER_PAGE: u64 = 10;
|
||||
const ONE_DAY_MS: i64 = 86400000;
|
||||
|
||||
use crate::{
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
use super::spawn_request_action;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct BuildId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CreateBuildBody {
|
||||
name: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CopyBuildBody {
|
||||
name: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BuildVersionsQuery {
|
||||
#[serde(default)]
|
||||
page: u32,
|
||||
major: Option<i32>,
|
||||
minor: Option<i32>,
|
||||
patch: Option<i32>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BuildStatsQuery {
|
||||
#[serde(default)]
|
||||
page: u32,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BuildStatsResponse {
|
||||
pub total_time: f64, // in hours
|
||||
pub total_count: f64, // number of builds
|
||||
pub days: Vec<BuildStatsDay>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct BuildStatsDay {
|
||||
pub time: f64,
|
||||
pub count: f64,
|
||||
pub ts: f64,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/:id",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(build_id): Path<BuildId>| async move {
|
||||
let build = state
|
||||
.get_build_check_permissions(&build_id.id, &user, PermissionLevel::Read)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/list",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Query(query): Query<Document>| async move {
|
||||
let builds = state
|
||||
.list_builds(&user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(builds))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(build): Json<CreateBuildBody>| async move {
|
||||
let build = state
|
||||
.create_build(&build.name, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create_full",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(build): Json<Build>| async move {
|
||||
let build = spawn_request_action(async move {
|
||||
state
|
||||
.create_full_build(build, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/copy",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(BuildId { id }): Path<BuildId>,
|
||||
Json(build): Json<CopyBuildBody>| async move {
|
||||
let build = spawn_request_action(async move {
|
||||
state
|
||||
.copy_build(&id, build.name, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/delete",
|
||||
delete(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(build_id): Path<BuildId>| async move {
|
||||
let build = spawn_request_action(async move {
|
||||
state
|
||||
.delete_build(&build_id.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/update",
|
||||
patch(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(build): Json<Build>| async move {
|
||||
let build = spawn_request_action(async move {
|
||||
state
|
||||
.update_build(build, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/build",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(build_id): Path<BuildId>| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.build(&build_id.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/action_state",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(BuildId { id }): Path<BuildId>| async move {
|
||||
let action_state = state
|
||||
.get_build_action_states(id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(action_state))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/versions",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(BuildId { id }),
|
||||
Query(query): Query<BuildVersionsQuery>| async move {
|
||||
let versions = state
|
||||
.get_build_versions(&id, &user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(versions))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/aws_builder_defaults",
|
||||
get(|state: StateExtension| async move {
|
||||
Json(AwsBuilderConfig {
|
||||
access_key_id: String::new(),
|
||||
secret_access_key: String::new(),
|
||||
..state.config.aws.clone()
|
||||
})
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/docker_organizations",
|
||||
get(|state: StateExtension| async move {
|
||||
Json(state.config.docker_organizations.clone())
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/stats",
|
||||
get(|state: StateExtension, query: Query<BuildStatsQuery>| async move {
|
||||
let stats = state.get_build_stats(query.page).await.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
async fn list_builds(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
query: impl Into<Option<Document>>,
|
||||
) -> anyhow::Result<Vec<Build>> {
|
||||
let builds: Vec<Build> = self
|
||||
.db
|
||||
.builds
|
||||
.get_some(query, None)
|
||||
.await
|
||||
.context("failed at get all builds query")?
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
if user.is_admin {
|
||||
true
|
||||
} else {
|
||||
let permissions = s.get_user_permissions(&user.id);
|
||||
permissions != PermissionLevel::None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(builds)
|
||||
}
|
||||
|
||||
async fn get_build_action_states(
|
||||
&self,
|
||||
id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<BuildActionState> {
|
||||
self.get_build_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let action_state = self.build_action_states.get_or_default(id).await;
|
||||
Ok(action_state)
|
||||
}
|
||||
|
||||
async fn get_build_versions(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
query: BuildVersionsQuery,
|
||||
) -> anyhow::Result<Vec<BuildVersionsReponse>> {
|
||||
self.get_build_check_permissions(&id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let mut filter = doc! {
|
||||
"target": {
|
||||
"type": "Build",
|
||||
"id": id
|
||||
},
|
||||
"operation": Operation::BuildBuild.to_string(),
|
||||
"status": UpdateStatus::Complete.to_string(),
|
||||
"success": true
|
||||
};
|
||||
if let Some(major) = query.major {
|
||||
filter.insert("version.major", major);
|
||||
}
|
||||
if let Some(minor) = query.minor {
|
||||
filter.insert("version.minor", minor);
|
||||
}
|
||||
if let Some(patch) = query.patch {
|
||||
filter.insert("version.patch", patch);
|
||||
}
|
||||
let versions = self
|
||||
.db
|
||||
.updates
|
||||
.get_some(
|
||||
filter,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "_id": -1 })
|
||||
.limit(NUM_VERSIONS_PER_PAGE as i64)
|
||||
.skip(query.page as u64 * NUM_VERSIONS_PER_PAGE)
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to pull versions from mongo")?
|
||||
.into_iter()
|
||||
.map(|u| (u.version, u.start_ts))
|
||||
.filter(|(v, _)| v.is_some())
|
||||
.map(|(v, ts)| BuildVersionsReponse {
|
||||
version: v.unwrap(),
|
||||
ts,
|
||||
})
|
||||
.collect();
|
||||
Ok(versions)
|
||||
}
|
||||
|
||||
async fn get_build_stats(&self, page: u32) -> anyhow::Result<BuildStatsResponse> {
|
||||
let curr_ts = unix_timestamp_ms() as i64;
|
||||
let next_day = curr_ts - curr_ts % ONE_DAY_MS + ONE_DAY_MS;
|
||||
|
||||
let close_ts = next_day - page as i64 * 30 * ONE_DAY_MS;
|
||||
let open_ts = close_ts - 30 * ONE_DAY_MS;
|
||||
|
||||
let mut build_updates = self
|
||||
.db
|
||||
.updates
|
||||
.collection
|
||||
.find(
|
||||
doc! {
|
||||
"start_ts": {
|
||||
"$gte": monitor_ts_from_unix(open_ts)
|
||||
.context("open_ts out of bounds")?,
|
||||
"$lt": monitor_ts_from_unix(close_ts)
|
||||
.context("close_ts out of bounds")?
|
||||
},
|
||||
"operation": Operation::BuildBuild.to_string(),
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut days = HashMap::<i64, BuildStatsDay>::with_capacity(32);
|
||||
|
||||
let mut curr = open_ts;
|
||||
|
||||
while curr < close_ts {
|
||||
let stats = BuildStatsDay {
|
||||
ts: curr as f64,
|
||||
..Default::default()
|
||||
};
|
||||
days.insert(curr, stats);
|
||||
curr += ONE_DAY_MS;
|
||||
}
|
||||
|
||||
while let Some(update) = build_updates.try_next().await? {
|
||||
if let Some(end_ts) = update.end_ts {
|
||||
let start_ts = unix_from_monitor_ts(&update.start_ts)
|
||||
.context("failed to parse update start_ts")?;
|
||||
let end_ts =
|
||||
unix_from_monitor_ts(&end_ts).context("failed to parse update end_ts")?;
|
||||
let day = start_ts - start_ts % ONE_DAY_MS;
|
||||
let mut entry = days.entry(day).or_default();
|
||||
entry.count += 1.0;
|
||||
entry.time += ms_to_hour(end_ts - start_ts);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(BuildStatsResponse::new(days.into_values().collect()))
|
||||
}
|
||||
}
|
||||
|
||||
impl BuildStatsResponse {
|
||||
fn new(mut days: Vec<BuildStatsDay>) -> BuildStatsResponse {
|
||||
days.sort_by(|a, b| {
|
||||
if a.ts < b.ts {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Greater
|
||||
}
|
||||
});
|
||||
let mut total_time = 0.0;
|
||||
let mut total_count = 0.0;
|
||||
for day in &days {
|
||||
total_time += day.time;
|
||||
total_count += day.count;
|
||||
}
|
||||
BuildStatsResponse {
|
||||
total_time,
|
||||
total_count,
|
||||
days,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const MS_TO_HOUR_DIVISOR: f64 = 1000.0 * 60.0 * 60.0;
|
||||
fn ms_to_hour(duration: i64) -> f64 {
|
||||
duration as f64 / MS_TO_HOUR_DIVISOR
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
routing::{delete, get, patch, post},
|
||||
Json, Router,
|
||||
};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::Document;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{traits::Permissioned, CommandActionState, PeripheryCommand, PermissionLevel};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
api::spawn_request_action,
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CommandId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CreateCommandBody {
|
||||
name: String,
|
||||
server_id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CopyCommandBody {
|
||||
name: String,
|
||||
server_id: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/:id",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(CommandId { id })| async move {
|
||||
let command = state
|
||||
.get_command_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(command))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/list",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Query(query): Query<Document>| async move {
|
||||
let commands = state
|
||||
.list_commands(&user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(commands))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(command): Json<CreateCommandBody>| async move {
|
||||
let command = state
|
||||
.create_command(&command.name, command.server_id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(command))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create_full",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(command): Json<PeripheryCommand>| async move {
|
||||
let command = spawn_request_action(async move {
|
||||
state
|
||||
.create_full_command(command, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(command))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/copy",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(CommandId { id }),
|
||||
Json(command): Json<CopyCommandBody>| async move {
|
||||
let command = spawn_request_action(async move {
|
||||
state
|
||||
.copy_command(&id, command.name, command.server_id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(command))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/delete",
|
||||
delete(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(CommandId { id })| async move {
|
||||
let build = spawn_request_action(async move {
|
||||
state
|
||||
.delete_command(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(build))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/update",
|
||||
patch(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(command): Json<PeripheryCommand>| async move {
|
||||
let command = spawn_request_action(async move {
|
||||
state
|
||||
.update_command(command, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(command))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/action_state",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(CommandId { id })| async move {
|
||||
let action_state = state
|
||||
.get_command_action_states(id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(action_state))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/run",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(CommandId { id })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.run_command(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
async fn list_commands(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
query: impl Into<Option<Document>>,
|
||||
) -> anyhow::Result<Vec<PeripheryCommand>> {
|
||||
let commands: Vec<PeripheryCommand> = self
|
||||
.db
|
||||
.commands
|
||||
.get_some(query, None)
|
||||
.await
|
||||
.context("failed at get all commands query")?
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
if user.is_admin {
|
||||
true
|
||||
} else {
|
||||
let permissions = s.get_user_permissions(&user.id);
|
||||
permissions != PermissionLevel::None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(commands)
|
||||
}
|
||||
|
||||
async fn get_command_action_states(
|
||||
&self,
|
||||
id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<CommandActionState> {
|
||||
self.get_command_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let action_state = self.command_action_states.get_or_default(id).await;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
@@ -1,543 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
routing::{delete, get, patch, post},
|
||||
Json, Router,
|
||||
};
|
||||
use futures_util::future::join_all;
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::{
|
||||
bson::{doc, Document},
|
||||
options::FindOneOptions,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{
|
||||
traits::Permissioned, Deployment, DeploymentActionState, DeploymentWithContainerState,
|
||||
DockerContainerState, DockerContainerStats, Log, Operation, PermissionLevel, Server,
|
||||
TerminationSignal, UpdateStatus,
|
||||
};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
use super::spawn_request_action;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct DeploymentId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CreateDeploymentBody {
|
||||
name: String,
|
||||
server_id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CopyDeploymentBody {
|
||||
name: String,
|
||||
server_id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct RenameDeploymentBody {
|
||||
new_name: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize)]
|
||||
pub struct GetContainerLogQuery {
|
||||
tail: Option<u32>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize)]
|
||||
pub struct StopContainerQuery {
|
||||
stop_signal: Option<TerminationSignal>,
|
||||
stop_time: Option<i32>,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/:id",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id })| async move {
|
||||
let res = state
|
||||
.get_deployment_with_container_state(&user, &id)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(res))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/list",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Query(query): Query<Document>| async move {
|
||||
let deployments = state
|
||||
.list_deployments_with_container_state(&user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(deployments))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(deployment): Json<CreateDeploymentBody>| async move {
|
||||
let deployment = state
|
||||
.create_deployment(&deployment.name, deployment.server_id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(deployment))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create_full",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(full_deployment): Json<Deployment>| async move {
|
||||
let deployment = spawn_request_action(async move {
|
||||
state
|
||||
.create_full_deployment(full_deployment, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(deployment))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/copy",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id }),
|
||||
Json(deployment): Json<CopyDeploymentBody>| async move {
|
||||
let deployment = spawn_request_action(async move {
|
||||
state
|
||||
.copy_deployment(&id, deployment.name, deployment.server_id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(deployment))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/delete",
|
||||
delete(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId{ id }),
|
||||
Query(StopContainerQuery { stop_signal, stop_time })| async move {
|
||||
let deployment = spawn_request_action(async move {
|
||||
state
|
||||
.delete_deployment(&id, &user, stop_signal, stop_time)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(deployment))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/update",
|
||||
patch(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(deployment): Json<Deployment>| async move {
|
||||
let deployment = spawn_request_action(async move {
|
||||
state
|
||||
.update_deployment(deployment, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(deployment))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/rename",
|
||||
patch(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
deployment: Path<DeploymentId>,
|
||||
body: Json<RenameDeploymentBody>| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.rename_deployment(&deployment.id, &body.new_name, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/reclone",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.reclone_deployment(&id, &user, true)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/deploy",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id }),
|
||||
Query(StopContainerQuery { stop_signal, stop_time })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.deploy_container(&id, &user, stop_signal, stop_time)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/start_container",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.start_container(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/stop_container",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id }),
|
||||
Query(StopContainerQuery { stop_signal, stop_time })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.stop_container(&id, &user, stop_signal, stop_time)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/remove_container",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id }),
|
||||
Query(StopContainerQuery { stop_signal, stop_time })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.remove_container(&id, &user, stop_signal, stop_time)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/pull",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id })| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.pull_deployment_repo(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/action_state",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id }): Path<DeploymentId>| async move {
|
||||
let action_state = state
|
||||
.get_deployment_action_states(id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(action_state))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/log",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id }),
|
||||
Query(query): Query<GetContainerLogQuery>| async move {
|
||||
let log = state
|
||||
.get_deployment_container_log(&id, &user, query.tail)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(log))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/stats",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id })| async move {
|
||||
let stats = state
|
||||
.get_deployment_container_stats(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/deployed_version",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(DeploymentId { id })| async move {
|
||||
let version = state
|
||||
.get_deployment_deployed_version(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(version)
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub async fn get_deployment_with_container_state(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
id: &str,
|
||||
) -> anyhow::Result<DeploymentWithContainerState> {
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let (state, container) = match self.periphery.container_list(&server).await {
|
||||
Ok(containers) => match containers.into_iter().find(|c| c.name == deployment.name) {
|
||||
Some(container) => (container.state, Some(container)),
|
||||
None => (DockerContainerState::NotDeployed, None),
|
||||
},
|
||||
Err(_) => (DockerContainerState::Unknown, None),
|
||||
};
|
||||
Ok(DeploymentWithContainerState {
|
||||
deployment,
|
||||
state,
|
||||
container,
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_deployments_with_container_state(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
query: impl Into<Option<Document>>,
|
||||
) -> anyhow::Result<Vec<DeploymentWithContainerState>> {
|
||||
let deployments: Vec<Deployment> = self
|
||||
.db
|
||||
.deployments
|
||||
.get_some(query, None)
|
||||
.await
|
||||
.context("failed at get all deployments query")?
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
if user.is_admin {
|
||||
true
|
||||
} else {
|
||||
let permissions = s.get_user_permissions(&user.id);
|
||||
permissions != PermissionLevel::None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut servers: Vec<Server> = Vec::new();
|
||||
for d in &deployments {
|
||||
if servers.iter().find(|s| s.id == d.server_id).is_none() {
|
||||
servers.push(self.db.get_server(&d.server_id).await?)
|
||||
}
|
||||
}
|
||||
let containers_futures = servers
|
||||
.into_iter()
|
||||
.map(|server| async { (self.periphery.container_list(&server).await, server.id) });
|
||||
|
||||
let containers = join_all(containers_futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|(container, server_id)| (server_id, container.ok()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let deployments_with_containers = deployments
|
||||
.into_iter()
|
||||
.map(|deployment| {
|
||||
let (state, container) = match containers.get(&deployment.server_id).unwrap() {
|
||||
Some(container) => {
|
||||
match container
|
||||
.iter()
|
||||
.find(|c| c.name == deployment.name)
|
||||
.map(|c| c.to_owned())
|
||||
{
|
||||
Some(container) => (container.state, Some(container)),
|
||||
None => (DockerContainerState::NotDeployed, None),
|
||||
}
|
||||
}
|
||||
None => (DockerContainerState::Unknown, None),
|
||||
};
|
||||
DeploymentWithContainerState {
|
||||
container,
|
||||
deployment,
|
||||
state,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<DeploymentWithContainerState>>();
|
||||
Ok(deployments_with_containers)
|
||||
}
|
||||
|
||||
async fn get_deployment_action_states(
|
||||
&self,
|
||||
id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<DeploymentActionState> {
|
||||
self.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let action_state = self.deployment_action_states.get_or_default(id).await;
|
||||
Ok(action_state)
|
||||
}
|
||||
|
||||
async fn get_deployment_container_log(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
tail: Option<u32>,
|
||||
) -> anyhow::Result<Log> {
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let log = self
|
||||
.periphery
|
||||
.container_log(&server, &deployment.name, tail)
|
||||
.await?;
|
||||
Ok(log)
|
||||
}
|
||||
|
||||
async fn get_deployment_container_stats(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<DockerContainerStats> {
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let server = self.db.get_server(&deployment.server_id).await?;
|
||||
let stats = self
|
||||
.periphery
|
||||
.container_stats(&server, &deployment.name)
|
||||
.await?;
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn get_deployment_deployed_version(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<String> {
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
if deployment.build_id.is_some() {
|
||||
let latest_deploy_update = self
|
||||
.db
|
||||
.updates
|
||||
.find_one(
|
||||
doc! {
|
||||
"target": {
|
||||
"type": "Deployment",
|
||||
"id": id
|
||||
},
|
||||
"operation": Operation::DeployContainer.to_string(),
|
||||
"status": UpdateStatus::Complete.to_string(),
|
||||
"success": true,
|
||||
},
|
||||
FindOneOptions::builder().sort(doc! { "_id": -1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed at query to get latest deploy update from mongo")?;
|
||||
if let Some(update) = latest_deploy_update {
|
||||
if let Some(version) = update.version {
|
||||
Ok(version.to_string())
|
||||
} else {
|
||||
Ok("unknown".to_string())
|
||||
}
|
||||
} else {
|
||||
Ok("unknown".to_string())
|
||||
}
|
||||
} else {
|
||||
let split = deployment
|
||||
.docker_run_args
|
||||
.image
|
||||
.split(':')
|
||||
.collect::<Vec<&str>>();
|
||||
if let Some(version) = split.get(1) {
|
||||
Ok(version.to_string())
|
||||
} else {
|
||||
Ok("unknown".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
|
||||
use axum_oauth2::random_duration;
|
||||
use helpers::handle_anyhow_error;
|
||||
use hex::ToHex;
|
||||
use hmac::{Hmac, Mac};
|
||||
use serde::Deserialize;
|
||||
use sha2::Sha256;
|
||||
use types::GITHUB_WEBHOOK_USER_ID;
|
||||
|
||||
use crate::{
|
||||
auth::RequestUser,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
use super::spawn_request_action;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Id {
|
||||
id: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/build/:id",
|
||||
post(|state: StateExtension, Path(Id { id }), headers: HeaderMap, body: String| async move {
|
||||
spawn_request_action(async move {
|
||||
state.handle_build_webhook(&id, headers, body).await.map_err(handle_anyhow_error)
|
||||
}).await?
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/deployment/:id",
|
||||
post(|state: StateExtension, Path(Id { id }), headers: HeaderMap, body: String| async move {
|
||||
spawn_request_action(async move {
|
||||
state.handle_deployment_webhook(&id, headers, body).await.map_err(handle_anyhow_error)
|
||||
}).await?
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/procedure/:id",
|
||||
post(|state: StateExtension, Path(Id { id }), headers: HeaderMap, body: String| async move {
|
||||
spawn_request_action(async move {
|
||||
state.handle_procedure_webhook(&id, headers, body).await.map_err(handle_anyhow_error)
|
||||
}).await?
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
async fn handle_build_webhook(
|
||||
&self,
|
||||
id: &str,
|
||||
headers: HeaderMap,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
self.verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let expected_branch = self
|
||||
.db
|
||||
.get_build(id)
|
||||
.await?
|
||||
.branch
|
||||
.ok_or(anyhow!("build has no branch attached"))?;
|
||||
if request_branch != expected_branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
self.build(
|
||||
id,
|
||||
&RequestUser {
|
||||
id: String::from(GITHUB_WEBHOOK_USER_ID),
|
||||
is_admin: true,
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_deployment_webhook(
|
||||
&self,
|
||||
id: &str,
|
||||
headers: HeaderMap,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
self.verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let expected_branch = self
|
||||
.db
|
||||
.get_deployment(id)
|
||||
.await?
|
||||
.branch
|
||||
.ok_or(anyhow!("deployment has no branch attached"))?;
|
||||
if request_branch != expected_branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
self.pull_deployment_repo(
|
||||
id,
|
||||
&RequestUser {
|
||||
id: String::from(GITHUB_WEBHOOK_USER_ID),
|
||||
is_admin: true,
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_procedure_webhook(
|
||||
&self,
|
||||
id: &str,
|
||||
headers: HeaderMap,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
self.verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let expected_branches = self.db.get_procedure(id).await?.webhook_branches;
|
||||
if !expected_branches.contains(&request_branch) {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
self.run_procedure(
|
||||
id,
|
||||
&RequestUser {
|
||||
id: String::from(GITHUB_WEBHOOK_USER_ID),
|
||||
is_admin: true,
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn verify_gh_signature(&self, headers: HeaderMap, body: &str) -> anyhow::Result<()> {
|
||||
// wait random amount of time
|
||||
tokio::time::sleep(random_duration(0, 500)).await;
|
||||
|
||||
let signature = headers.get("x-hub-signature-256");
|
||||
if signature.is_none() {
|
||||
return Err(anyhow!("no signature in headers"));
|
||||
}
|
||||
let signature = signature.unwrap().to_str();
|
||||
if signature.is_err() {
|
||||
return Err(anyhow!("failed to unwrap signature"));
|
||||
}
|
||||
let signature = signature.unwrap().replace("sha256=", "");
|
||||
let mut mac = HmacSha256::new_from_slice(self.config.github_webhook_secret.as_bytes())
|
||||
.expect("github webhook | failed to create hmac sha256");
|
||||
mac.update(body.as_bytes());
|
||||
let expected = mac.finalize().into_bytes().encode_hex::<String>();
|
||||
if signature == expected {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("signature does not equal expected"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GithubWebhookBody {
|
||||
#[serde(rename = "ref")]
|
||||
branch: String,
|
||||
}
|
||||
|
||||
fn extract_branch(body: &str) -> anyhow::Result<String> {
|
||||
let branch = serde_json::from_str::<GithubWebhookBody>(body)
|
||||
.context("failed to parse github request body")?
|
||||
.branch
|
||||
.replace("refs/heads/", "");
|
||||
Ok(branch)
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
routing::{delete, get, patch, post},
|
||||
Extension, Json, Router,
|
||||
};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::Document;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{traits::Permissioned, Group, PermissionLevel};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct GroupId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CreateGroupBody {
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/:id",
|
||||
get(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(group_id): Path<GroupId>| async move {
|
||||
let group = state
|
||||
.get_group_check_permissions(&group_id.id, &user, PermissionLevel::Read)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(group))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/list",
|
||||
get(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Query(query): Query<Document>| async move {
|
||||
let groups = state
|
||||
.list_groups(&user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(groups))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create",
|
||||
post(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(group): Json<CreateGroupBody>| async move {
|
||||
let group = state
|
||||
.create_group(&group.name, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(group))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create_full",
|
||||
post(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(group): Json<Group>| async move {
|
||||
let group = state
|
||||
.create_full_group(group, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(group))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/delete",
|
||||
delete(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(group_id): Path<GroupId>| async move {
|
||||
let group = state
|
||||
.delete_group(&group_id.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(group))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/update",
|
||||
patch(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(group): Json<Group>| async move {
|
||||
let group = state
|
||||
.update_group(group, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(group))
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
async fn list_groups(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
query: impl Into<Option<Document>>,
|
||||
) -> anyhow::Result<Vec<Group>> {
|
||||
let groups: Vec<Group> = self
|
||||
.db
|
||||
.groups
|
||||
.get_some(query, None)
|
||||
.await
|
||||
.context("failed at get all groups query")?
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
if user.is_admin {
|
||||
true
|
||||
} else {
|
||||
let permissions = s.get_user_permissions(&user.id);
|
||||
permissions != PermissionLevel::None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// groups.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
|
||||
Ok(groups)
|
||||
}
|
||||
}
|
||||
@@ -1,236 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::Path,
|
||||
http::{Request, StatusCode},
|
||||
middleware,
|
||||
routing::{get, post},
|
||||
Extension, Json, Router,
|
||||
};
|
||||
use futures_util::Future;
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use serde::Deserialize;
|
||||
use types::{PermissionLevel, UpdateTarget, User};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
auth::{auth_request, JwtExtension, RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
ResponseResult,
|
||||
};
|
||||
|
||||
mod build;
|
||||
mod command;
|
||||
mod deployment;
|
||||
mod github_listener;
|
||||
mod group;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod secret;
|
||||
mod server;
|
||||
mod update;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize)]
|
||||
struct UpdateDescriptionBody {
|
||||
target: UpdateTarget,
|
||||
description: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct UserId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/title",
|
||||
get(|state: StateExtension| async move { state.config.title.clone() }),
|
||||
)
|
||||
.route("/user", get(get_request_user))
|
||||
.nest("/listener", github_listener::router())
|
||||
.nest(
|
||||
"/",
|
||||
Router::new()
|
||||
.route("/user/:id", get(get_user_at_id))
|
||||
.route(
|
||||
"/username/:id",
|
||||
get(|state: StateExtension, Path(UserId { id })| async move {
|
||||
let user = state
|
||||
.db
|
||||
.get_user(&id)
|
||||
.await
|
||||
.context("failed to find user at id")
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(user.username))
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/github_webhook_base_url",
|
||||
get(|state: StateExtension| async move {
|
||||
state
|
||||
.config
|
||||
.github_webhook_base_url
|
||||
.as_ref()
|
||||
.unwrap_or(&state.config.host)
|
||||
.to_string()
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/update_description",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
body: Json<UpdateDescriptionBody>| async move {
|
||||
state
|
||||
.update_description(&body.target, &body.description, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
},
|
||||
),
|
||||
)
|
||||
.route("/users", get(get_users))
|
||||
.nest("/build", build::router())
|
||||
.nest("/deployment", deployment::router())
|
||||
.nest("/server", server::router())
|
||||
.nest("/command", command::router())
|
||||
.nest("/procedure", procedure::router())
|
||||
.nest("/group", group::router())
|
||||
.nest("/update", update::router())
|
||||
.nest("/permissions", permissions::router())
|
||||
.nest("/secret", secret::router())
|
||||
.layer(middleware::from_fn(auth_request)),
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_request_user(
|
||||
Extension(jwt): JwtExtension,
|
||||
req: Request<Body>,
|
||||
) -> ResponseResult<Json<User>> {
|
||||
let mut user = jwt.authenticate(&req).await.map_err(handle_anyhow_error)?;
|
||||
user.password = None;
|
||||
for secret in &mut user.secrets {
|
||||
secret.hash = String::new();
|
||||
}
|
||||
Ok(Json(user))
|
||||
}
|
||||
|
||||
async fn get_users(
|
||||
state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
) -> ResponseResult<Json<Vec<User>>> {
|
||||
if user.is_admin {
|
||||
let users = state
|
||||
.db
|
||||
.users
|
||||
.get_some(None, None)
|
||||
.await
|
||||
.context("failed to get users from db")
|
||||
.map_err(handle_anyhow_error)?
|
||||
.into_iter()
|
||||
.map(|u| User {
|
||||
password: None,
|
||||
secrets: vec![],
|
||||
..u
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(Json(users))
|
||||
} else {
|
||||
Err((StatusCode::UNAUTHORIZED, "user is not admin".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_user_at_id(
|
||||
state: StateExtension,
|
||||
Path(UserId { id }): Path<UserId>,
|
||||
user: RequestUserExtension,
|
||||
) -> ResponseResult<Json<User>> {
|
||||
if user.is_admin {
|
||||
let mut user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&id)
|
||||
.await
|
||||
.context("failed at query to get user from mongo")
|
||||
.map_err(handle_anyhow_error)?
|
||||
.ok_or(anyhow!(""))
|
||||
.map_err(handle_anyhow_error)?;
|
||||
user.password = None;
|
||||
for secret in &mut user.secrets {
|
||||
secret.hash = String::new();
|
||||
}
|
||||
Ok(Json(user))
|
||||
} else {
|
||||
Err((StatusCode::UNAUTHORIZED, "user is not admin".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
// need to run requested actions in here to prevent them being dropped mid action when user disconnects prematurely
|
||||
pub async fn spawn_request_action<A>(action: A) -> ResponseResult<A::Output>
|
||||
where
|
||||
A: Future + Send + 'static,
|
||||
A::Output: Send + 'static,
|
||||
{
|
||||
let res = tokio::spawn(action)
|
||||
.await
|
||||
.context("failure at action thread spawn")
|
||||
.map_err(handle_anyhow_error)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub async fn update_description(
|
||||
&self,
|
||||
target: &UpdateTarget,
|
||||
description: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<()> {
|
||||
match target {
|
||||
UpdateTarget::Build(id) => {
|
||||
self.get_build_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
self.db
|
||||
.builds
|
||||
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
|
||||
.await?;
|
||||
}
|
||||
UpdateTarget::Deployment(id) => {
|
||||
self.get_deployment_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
self.db
|
||||
.deployments
|
||||
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
|
||||
.await?;
|
||||
}
|
||||
UpdateTarget::Server(id) => {
|
||||
self.get_server_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
self.db
|
||||
.servers
|
||||
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
|
||||
.await?;
|
||||
}
|
||||
UpdateTarget::Group(id) => {
|
||||
self.get_group_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
self.db
|
||||
.groups
|
||||
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
|
||||
.await?;
|
||||
}
|
||||
UpdateTarget::Procedure(id) => {
|
||||
self.get_procedure_check_permissions(id, user, PermissionLevel::Update)
|
||||
.await?;
|
||||
self.db
|
||||
.procedures
|
||||
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
|
||||
.await?;
|
||||
}
|
||||
_ => return Err(anyhow!("invalid target: {target:?}")),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,410 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{routing::post, Extension, Json, Router};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::{doc, Document};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{
|
||||
monitor_timestamp, Build, Deployment, Group, Log, Operation, PermissionLevel,
|
||||
PermissionsTarget, Procedure, Server, Update, UpdateStatus, UpdateTarget,
|
||||
};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{auth::RequestUserExtension, response, state::StateExtension};
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct PermissionsUpdateBody {
|
||||
user_id: String,
|
||||
permission: PermissionLevel,
|
||||
target_type: PermissionsTarget,
|
||||
target_id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct ModifyUserEnabledBody {
|
||||
user_id: String,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct ModifyUserCreateServerBody {
|
||||
user_id: String,
|
||||
create_server_permissions: bool,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct ModifyUserCreateBuildBody {
|
||||
user_id: String,
|
||||
create_build_permissions: bool,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/update",
|
||||
post(|state, user, update| async {
|
||||
let update = update_permissions(state, user, update)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(update))
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/modify_enabled",
|
||||
post(|state, user, body| async {
|
||||
let update = modify_user_enabled(state, user, body)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(update))
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/modify_create_server",
|
||||
post(|state, user, body| async {
|
||||
let update = modify_user_create_server_permissions(state, user, body)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(update))
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/modify_create_build",
|
||||
post(|state, user, body| async {
|
||||
let update = modify_user_create_build_permissions(state, user, body)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(update))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
async fn update_permissions(
|
||||
Extension(state): StateExtension,
|
||||
Extension(req_user): RequestUserExtension,
|
||||
Json(permission_update): Json<PermissionsUpdateBody>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if !req_user.is_admin {
|
||||
return Err(anyhow!(
|
||||
"user not authorized for this action (is not admin)"
|
||||
));
|
||||
}
|
||||
let target_user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&permission_update.user_id)
|
||||
.await
|
||||
.context("failed at find target user query")?
|
||||
.ok_or(anyhow!(
|
||||
"failed to find a user with id {}",
|
||||
permission_update.user_id
|
||||
))?;
|
||||
if !target_user.enabled {
|
||||
return Err(anyhow!("target user not enabled"));
|
||||
}
|
||||
let mut update = Update {
|
||||
operation: Operation::ModifyUserPermissions,
|
||||
start_ts: monitor_timestamp(),
|
||||
success: true,
|
||||
operator: req_user.id.clone(),
|
||||
status: UpdateStatus::Complete,
|
||||
..Default::default()
|
||||
};
|
||||
let log_text = match permission_update.target_type {
|
||||
PermissionsTarget::Server => {
|
||||
let server = state
|
||||
.db
|
||||
.servers
|
||||
.find_one_by_id(&permission_update.target_id)
|
||||
.await
|
||||
.context("failed at find server query")?
|
||||
.ok_or(anyhow!(
|
||||
"failed to find a server with id {}",
|
||||
permission_update.target_id
|
||||
))?;
|
||||
state
|
||||
.db
|
||||
.servers
|
||||
.update_one::<Server>(
|
||||
&permission_update.target_id,
|
||||
mungos::Update::Set(doc! {
|
||||
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
update.target = UpdateTarget::Server(server.id);
|
||||
format!(
|
||||
"user {} given {} permissions on server {}",
|
||||
target_user.username, permission_update.permission, server.name
|
||||
)
|
||||
}
|
||||
PermissionsTarget::Deployment => {
|
||||
let deployment = state
|
||||
.db
|
||||
.deployments
|
||||
.find_one_by_id(&permission_update.target_id)
|
||||
.await
|
||||
.context("failed at find deployment query")?
|
||||
.ok_or(anyhow!(
|
||||
"failed to find a deployment with id {}",
|
||||
permission_update.target_id
|
||||
))?;
|
||||
state
|
||||
.db
|
||||
.deployments
|
||||
.update_one::<Deployment>(
|
||||
&permission_update.target_id,
|
||||
mungos::Update::Set(doc! {
|
||||
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
update.target = UpdateTarget::Deployment(deployment.id);
|
||||
format!(
|
||||
"user {} (id: {}) given {} permissions on deployment {}",
|
||||
target_user.username, target_user.id, permission_update.permission, deployment.name
|
||||
)
|
||||
}
|
||||
PermissionsTarget::Build => {
|
||||
let build = state
|
||||
.db
|
||||
.builds
|
||||
.find_one_by_id(&permission_update.target_id)
|
||||
.await
|
||||
.context("failed at find build query")?
|
||||
.ok_or(anyhow!(
|
||||
"failed to find a build with id {}",
|
||||
permission_update.target_id
|
||||
))?;
|
||||
state
|
||||
.db
|
||||
.builds
|
||||
.update_one::<Build>(
|
||||
&permission_update.target_id,
|
||||
mungos::Update::Set(doc! {
|
||||
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
update.target = UpdateTarget::Build(build.id);
|
||||
format!(
|
||||
"user {} given {} permissions on build {}",
|
||||
target_user.username, permission_update.permission, build.name
|
||||
)
|
||||
}
|
||||
PermissionsTarget::Procedure => {
|
||||
let procedure = state
|
||||
.db
|
||||
.procedures
|
||||
.find_one_by_id(&permission_update.target_id)
|
||||
.await
|
||||
.context("failed at find procedure query")?
|
||||
.ok_or(anyhow!(
|
||||
"failed to find a procedure with id {}",
|
||||
permission_update.target_id
|
||||
))?;
|
||||
state
|
||||
.db
|
||||
.procedures
|
||||
.update_one::<Procedure>(
|
||||
&permission_update.target_id,
|
||||
mungos::Update::Set(doc! {
|
||||
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
update.target = UpdateTarget::Procedure(procedure.id);
|
||||
format!(
|
||||
"user {} given {} permissions on procedure {}",
|
||||
target_user.username, permission_update.permission, procedure.name
|
||||
)
|
||||
}
|
||||
PermissionsTarget::Group => {
|
||||
let group = state
|
||||
.db
|
||||
.groups
|
||||
.find_one_by_id(&permission_update.target_id)
|
||||
.await
|
||||
.context("failed at find group query")?
|
||||
.ok_or(anyhow!(
|
||||
"failed to find a group with id {}",
|
||||
permission_update.target_id
|
||||
))?;
|
||||
state
|
||||
.db
|
||||
.groups
|
||||
.update_one::<Group>(
|
||||
&permission_update.target_id,
|
||||
mungos::Update::Set(doc! {
|
||||
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
update.target = UpdateTarget::Group(group.id);
|
||||
format!(
|
||||
"user {} given {} permissions on group {}",
|
||||
target_user.username, permission_update.permission, group.name
|
||||
)
|
||||
}
|
||||
};
|
||||
update
|
||||
.logs
|
||||
.push(Log::simple("modify permissions", log_text));
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.id = state.add_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn modify_user_enabled(
|
||||
Extension(state): StateExtension,
|
||||
Extension(req_user): RequestUserExtension,
|
||||
Json(ModifyUserEnabledBody { user_id, enabled }): Json<ModifyUserEnabledBody>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if !req_user.is_admin {
|
||||
return Err(anyhow!(
|
||||
"user does not have permissions for this action (not admin)"
|
||||
));
|
||||
}
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&user_id)
|
||||
.await
|
||||
.context("failed at mongo query to find target user")?
|
||||
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
|
||||
state
|
||||
.db
|
||||
.users
|
||||
.update_one::<Document>(&user_id, mungos::Update::Set(doc! { "enabled": enabled }))
|
||||
.await?;
|
||||
let update_type = if enabled { "enabled" } else { "disabled" };
|
||||
let ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::System,
|
||||
operation: Operation::ModifyUserEnabled,
|
||||
logs: vec![Log::simple(
|
||||
"modify user enabled",
|
||||
format!("{update_type} {} (id: {})", user.username, user.id),
|
||||
)],
|
||||
start_ts: ts.clone(),
|
||||
end_ts: Some(ts),
|
||||
status: UpdateStatus::Complete,
|
||||
success: true,
|
||||
operator: req_user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = state.add_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn modify_user_create_server_permissions(
|
||||
Extension(state): StateExtension,
|
||||
Extension(req_user): RequestUserExtension,
|
||||
Json(ModifyUserCreateServerBody {
|
||||
user_id,
|
||||
create_server_permissions,
|
||||
}): Json<ModifyUserCreateServerBody>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if !req_user.is_admin {
|
||||
return Err(anyhow!(
|
||||
"user does not have permissions for this action (not admin)"
|
||||
));
|
||||
}
|
||||
let target_user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&user_id)
|
||||
.await
|
||||
.context("failed at mongo query to find target user")?
|
||||
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
|
||||
state
|
||||
.db
|
||||
.users
|
||||
.update_one::<Document>(
|
||||
&user_id,
|
||||
mungos::Update::Set(doc! { "create_server_permissions": create_server_permissions }),
|
||||
)
|
||||
.await?;
|
||||
let update_type = if create_server_permissions {
|
||||
"enabled"
|
||||
} else {
|
||||
"disabled"
|
||||
};
|
||||
let ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::System,
|
||||
operation: Operation::ModifyUserCreateServerPermissions,
|
||||
logs: vec![Log::simple(
|
||||
"modify user create server permissions",
|
||||
format!(
|
||||
"{update_type} create server permissions for {} (id: {})",
|
||||
target_user.username, target_user.id
|
||||
),
|
||||
)],
|
||||
start_ts: ts.clone(),
|
||||
end_ts: Some(ts),
|
||||
status: UpdateStatus::Complete,
|
||||
success: true,
|
||||
operator: req_user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = state.add_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn modify_user_create_build_permissions(
|
||||
Extension(state): StateExtension,
|
||||
Extension(req_user): RequestUserExtension,
|
||||
Json(ModifyUserCreateBuildBody {
|
||||
user_id,
|
||||
create_build_permissions,
|
||||
}): Json<ModifyUserCreateBuildBody>,
|
||||
) -> anyhow::Result<Update> {
|
||||
if !req_user.is_admin {
|
||||
return Err(anyhow!(
|
||||
"user does not have permissions for this action (not admin)"
|
||||
));
|
||||
}
|
||||
let target_user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&user_id)
|
||||
.await
|
||||
.context("failed at mongo query to find target user")?
|
||||
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
|
||||
state
|
||||
.db
|
||||
.users
|
||||
.update_one::<Document>(
|
||||
&user_id,
|
||||
mungos::Update::Set(doc! { "create_build_permissions": create_build_permissions }),
|
||||
)
|
||||
.await?;
|
||||
let update_type = if create_build_permissions {
|
||||
"enabled"
|
||||
} else {
|
||||
"disabled"
|
||||
};
|
||||
let ts = monitor_timestamp();
|
||||
let mut update = Update {
|
||||
target: UpdateTarget::System,
|
||||
operation: Operation::ModifyUserCreateBuildPermissions,
|
||||
logs: vec![Log::simple(
|
||||
"modify user create build permissions",
|
||||
format!(
|
||||
"{update_type} create build permissions for {} (id: {})",
|
||||
target_user.username, target_user.id
|
||||
),
|
||||
)],
|
||||
start_ts: ts.clone(),
|
||||
end_ts: Some(ts),
|
||||
status: UpdateStatus::Complete,
|
||||
success: true,
|
||||
operator: req_user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
update.id = state.add_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
extract::{Path, Query},
|
||||
routing::{delete, get, patch, post},
|
||||
Extension, Json, Router,
|
||||
};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::Document;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{traits::Permissioned, PermissionLevel, Procedure};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
use super::spawn_request_action;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ProcedureId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CreateProcedureBody {
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/:id",
|
||||
get(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(procedure_id): Path<ProcedureId>| async move {
|
||||
let procedure = state
|
||||
.get_procedure_check_permissions(
|
||||
&procedure_id.id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(procedure))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/list",
|
||||
get(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Query(query): Query<Document>| async move {
|
||||
let procedures = state
|
||||
.list_procedures(&user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(procedures))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create",
|
||||
post(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(procedure): Json<CreateProcedureBody>| async move {
|
||||
let procedure = state
|
||||
.create_procedure(&procedure.name, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(procedure))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create_full",
|
||||
post(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(procedure): Json<Procedure>| async move {
|
||||
let procedure = state
|
||||
.create_full_procedure(procedure, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(procedure))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/delete",
|
||||
delete(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(procedure_id): Path<ProcedureId>| async move {
|
||||
let procedure = state
|
||||
.delete_procedure(&procedure_id.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(procedure))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/update",
|
||||
patch(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Json(procedure): Json<Procedure>| async move {
|
||||
let procedure = state
|
||||
.update_procedure(procedure, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(procedure))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/run",
|
||||
post(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(procedure_id): Path<ProcedureId>| async move {
|
||||
let update = spawn_request_action(async move {
|
||||
state
|
||||
.run_procedure(&procedure_id.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
})
|
||||
.await??;
|
||||
response!(Json(update))
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
async fn list_procedures(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
query: impl Into<Option<Document>>,
|
||||
) -> anyhow::Result<Vec<Procedure>> {
|
||||
let procedures: Vec<Procedure> = self
|
||||
.db
|
||||
.procedures
|
||||
.get_some(query, None)
|
||||
.await
|
||||
.context("failed at get all procedures query")?
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
if user.is_admin {
|
||||
true
|
||||
} else {
|
||||
let permissions = s.get_user_permissions(&user.id);
|
||||
permissions != PermissionLevel::None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
// procedures.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
|
||||
Ok(procedures)
|
||||
}
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{
|
||||
extract::Path,
|
||||
routing::{delete, post},
|
||||
Extension, Json, Router,
|
||||
};
|
||||
use helpers::{generate_secret, handle_anyhow_error};
|
||||
use mungos::{
|
||||
mongodb::bson::{doc, to_bson, Document},
|
||||
Update,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::{monitor_timestamp, ApiSecret};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{auth::RequestUserExtension, state::StateExtension};
|
||||
|
||||
const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CreateSecretBody {
|
||||
name: String,
|
||||
expires: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct DeleteSecretPath {
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/create",
|
||||
post(|state, user, secret| async {
|
||||
create(state, user, secret)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/delete/:name",
|
||||
delete(|state, user, secret_id| async {
|
||||
delete_one(state, user, secret_id)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
impl Into<ApiSecret> for CreateSecretBody {
|
||||
fn into(self) -> ApiSecret {
|
||||
ApiSecret {
|
||||
name: self.name,
|
||||
expires: self.expires,
|
||||
created_at: monitor_timestamp(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn create(
|
||||
Extension(state): StateExtension,
|
||||
Extension(req_user): RequestUserExtension,
|
||||
Json(secret): Json<CreateSecretBody>,
|
||||
) -> anyhow::Result<String> {
|
||||
let user = state.db.get_user(&req_user.id).await?;
|
||||
for s in &user.secrets {
|
||||
if s.name == secret.name {
|
||||
return Err(anyhow!("secret with name {} already exists", secret.name));
|
||||
}
|
||||
}
|
||||
let mut secret: ApiSecret = secret.into();
|
||||
let secret_str = generate_secret(SECRET_LENGTH);
|
||||
secret.hash =
|
||||
bcrypt::hash(&secret_str, BCRYPT_COST).context("failed at hashing secret string")?;
|
||||
state
|
||||
.db
|
||||
.users
|
||||
.update_one::<Document>(
|
||||
&req_user.id,
|
||||
Update::Custom(doc! {
|
||||
"$push": {
|
||||
"secrets": to_bson(&secret).context("failed at converting secret to bson")?
|
||||
}
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("failed at mongo update query")?;
|
||||
Ok(secret_str)
|
||||
}
|
||||
|
||||
async fn delete_one(
|
||||
Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Path(DeleteSecretPath { name }): Path<DeleteSecretPath>,
|
||||
) -> anyhow::Result<()> {
|
||||
state
|
||||
.db
|
||||
.users
|
||||
.update_one::<Document>(
|
||||
&user.id,
|
||||
Update::Custom(doc! {
|
||||
"$pull": {
|
||||
"secrets": {
|
||||
"name": name
|
||||
}
|
||||
}
|
||||
}),
|
||||
)
|
||||
.await
|
||||
.context("failed at mongo update query")?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,686 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{get_timelength_in_ms, unix_timestamp_ms};
|
||||
use axum::{
|
||||
extract::{ws::Message as AxumMessage, Path, Query, WebSocketUpgrade},
|
||||
response::IntoResponse,
|
||||
routing::{delete, get, patch, post},
|
||||
Json, Router,
|
||||
};
|
||||
use futures_util::{future::join_all, SinkExt, StreamExt};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::{
|
||||
bson::{doc, Document},
|
||||
options::FindOptions,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use tokio::select;
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use types::{
|
||||
traits::Permissioned, BasicContainerInfo, HistoricalStatsQuery, ImageSummary, Network,
|
||||
PermissionLevel, Server, ServerActionState, ServerStatus, ServerWithStatus, SystemInformation,
|
||||
SystemStats, SystemStatsQuery, SystemStatsRecord,
|
||||
};
|
||||
use typeshare::typeshare;
|
||||
|
||||
const MAX_HISTORICAL_STATS_LIMIT: i64 = 500;
|
||||
|
||||
use crate::{
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
use super::spawn_request_action;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ServerId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Ts {
|
||||
ts: i64,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize)]
|
||||
pub struct CreateServerBody {
|
||||
name: String,
|
||||
address: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/:id",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(server_id): Path<ServerId>| async move {
|
||||
let server = state
|
||||
.get_server(&server_id.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(server))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/list",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Query(query): Query<Document>| async move {
|
||||
let servers = state
|
||||
.list_servers(&user, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(servers))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(server): Json<CreateServerBody>| async move {
|
||||
let server = state
|
||||
.create_server(&server.name, server.address, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(server))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/create_full",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(server): Json<Server>| async move {
|
||||
let server = state
|
||||
.create_full_server(server, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(server))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/delete",
|
||||
delete(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(server): Path<ServerId>| async move {
|
||||
let server = state
|
||||
.delete_server(&server.id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(server))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/update",
|
||||
patch(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Json(server): Json<Server>| async move {
|
||||
let server = state
|
||||
.update_server(server, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(server))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/version",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = state
|
||||
.get_server_version(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/system_information",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = state
|
||||
.get_server_system_info(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/stats",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id }),
|
||||
query: Query<SystemStatsQuery>| async move {
|
||||
let stats = state
|
||||
.get_server_stats(&id, &user, &query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/stats/history",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id }),
|
||||
query: Query<HistoricalStatsQuery>| async move {
|
||||
let stats = state
|
||||
.get_historical_stats(&id, &user, &query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/stats/at_ts",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id }),
|
||||
Query(Ts { ts })| async move {
|
||||
let stats = state
|
||||
.get_stats_at_ts(&id, &user, ts)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/stats/ws",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id }),
|
||||
Query(query): Query<SystemStatsQuery>,
|
||||
ws: WebSocketUpgrade| async move {
|
||||
let connection = state
|
||||
.subscribe_to_stats_ws(&id, &user, &query, ws)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(connection)
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/networks",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = state
|
||||
.get_networks(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/networks/prune",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = spawn_request_action(async move {
|
||||
state
|
||||
.prune_networks(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}).await??;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/images",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = state
|
||||
.get_images(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/images/prune",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = spawn_request_action(async move {
|
||||
state
|
||||
.prune_images(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}).await??;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/containers",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = state
|
||||
.get_containers(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/containers/prune",
|
||||
post(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let stats = spawn_request_action(async move {
|
||||
state
|
||||
.prune_containers(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}).await??;
|
||||
response!(Json(stats))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/github_accounts",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let github_accounts = state
|
||||
.get_github_accounts(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(github_accounts))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/docker_accounts",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let docker_accounts = state
|
||||
.get_docker_accounts(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(docker_accounts))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/secrets",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let vars = state
|
||||
.get_available_secrets(&id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(vars))
|
||||
},
|
||||
),
|
||||
)
|
||||
.route(
|
||||
"/:id/action_state",
|
||||
get(
|
||||
|state: StateExtension,
|
||||
user: RequestUserExtension,
|
||||
Path(ServerId { id })| async move {
|
||||
let action_state = state
|
||||
.get_server_action_states(id, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(action_state))
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub async fn get_server(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<ServerWithStatus> {
|
||||
let server = self
|
||||
.get_server_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let status = if server.enabled {
|
||||
let res = self.periphery.health_check(&server).await;
|
||||
match res {
|
||||
Ok(_) => ServerStatus::Ok,
|
||||
Err(_) => ServerStatus::NotOk,
|
||||
}
|
||||
} else {
|
||||
ServerStatus::Disabled
|
||||
};
|
||||
Ok(ServerWithStatus { server, status })
|
||||
}
|
||||
|
||||
async fn list_servers(
|
||||
&self,
|
||||
user: &RequestUser,
|
||||
query: impl Into<Option<Document>>,
|
||||
) -> anyhow::Result<Vec<ServerWithStatus>> {
|
||||
let futures = self
|
||||
.db
|
||||
.servers
|
||||
.get_some(query, None)
|
||||
.await
|
||||
.context("failed at get all servers query")?
|
||||
.into_iter()
|
||||
.filter(|s| {
|
||||
if user.is_admin {
|
||||
true
|
||||
} else {
|
||||
let permissions = s.get_user_permissions(&user.id);
|
||||
permissions != PermissionLevel::None
|
||||
}
|
||||
})
|
||||
.map(|server| async {
|
||||
let status = if server.enabled {
|
||||
let res = self.periphery.health_check(&server).await;
|
||||
match res {
|
||||
Ok(_) => ServerStatus::Ok,
|
||||
Err(_) => ServerStatus::NotOk,
|
||||
}
|
||||
} else {
|
||||
ServerStatus::Disabled
|
||||
};
|
||||
|
||||
ServerWithStatus { server, status }
|
||||
});
|
||||
Ok(join_all(futures).await)
|
||||
}
|
||||
|
||||
async fn get_server_version(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let version = self.periphery.get_version(&server).await.context(format!(
|
||||
"failed to get system information from server {}",
|
||||
server.name
|
||||
))?;
|
||||
Ok(version)
|
||||
}
|
||||
|
||||
async fn get_server_system_info(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<SystemInformation> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let stats = self
|
||||
.periphery
|
||||
.get_system_information(&server)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to get system information from server {}",
|
||||
server.name
|
||||
))?;
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn get_server_stats(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
query: &SystemStatsQuery,
|
||||
) -> anyhow::Result<SystemStats> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let stats = self
|
||||
.periphery
|
||||
.get_system_stats(&server, query)
|
||||
.await
|
||||
.context(format!("failed to get stats from server {}", server.name))?;
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn get_historical_stats(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
query: &HistoricalStatsQuery,
|
||||
) -> anyhow::Result<Vec<SystemStatsRecord>> {
|
||||
self.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
|
||||
let mut projection = doc! { "processes": 0, "disk.disks": 0 };
|
||||
if !query.networks {
|
||||
projection.insert("networks", 0);
|
||||
}
|
||||
if !query.components {
|
||||
projection.insert("components", 0);
|
||||
}
|
||||
let limit = if query.limit as i64 > MAX_HISTORICAL_STATS_LIMIT {
|
||||
MAX_HISTORICAL_STATS_LIMIT
|
||||
} else {
|
||||
query.limit as i64
|
||||
};
|
||||
let interval = get_timelength_in_ms(query.interval.to_string().parse().unwrap()) as i64;
|
||||
let mut ts_vec = Vec::<i64>::new();
|
||||
let curr_ts = unix_timestamp_ms() as i64;
|
||||
let mut curr_ts = curr_ts - curr_ts % interval - interval * limit * query.page as i64;
|
||||
for _ in 0..limit {
|
||||
ts_vec.push(curr_ts);
|
||||
curr_ts -= interval;
|
||||
}
|
||||
self.db
|
||||
.stats
|
||||
.get_some(
|
||||
doc! {
|
||||
"server_id": server_id,
|
||||
"ts": { "$in": ts_vec }
|
||||
},
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "ts": 1 })
|
||||
.projection(projection)
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed at mongo query to get stats")
|
||||
}
|
||||
|
||||
async fn get_stats_at_ts(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
ts: i64,
|
||||
) -> anyhow::Result<SystemStatsRecord> {
|
||||
self.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
self.db
|
||||
.stats
|
||||
.find_one(doc! { "server_id": server_id, "ts": ts }, None)
|
||||
.await
|
||||
.context("failed at mongo query to get full stat entry")?
|
||||
.ok_or(anyhow!("did not find entry for server at time"))
|
||||
}
|
||||
|
||||
async fn subscribe_to_stats_ws(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
query: &SystemStatsQuery,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> anyhow::Result<impl IntoResponse> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let mut stats_reciever = self.periphery.subscribe_to_stats_ws(&server, query).await?;
|
||||
let upgrade = ws.on_upgrade(|socket| async move {
|
||||
let (mut ws_sender, mut ws_recv) = socket.split();
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let stats = select! {
|
||||
_ = cancel_clone.cancelled() => break,
|
||||
stats = stats_reciever.next() => stats
|
||||
};
|
||||
if let Some(Ok(Message::Text(msg))) = stats {
|
||||
let _ = ws_sender.send(AxumMessage::Text(msg)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
while let Some(msg) = ws_recv.next().await {
|
||||
match msg {
|
||||
Ok(msg) => match msg {
|
||||
AxumMessage::Close(_) => {
|
||||
cancel.cancel();
|
||||
return;
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
Err(_) => {
|
||||
cancel.cancel();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(upgrade)
|
||||
}
|
||||
|
||||
async fn get_networks(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<Network>> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let stats = self.periphery.network_list(&server).await.context(format!(
|
||||
"failed to get networks from server {}",
|
||||
server.name
|
||||
))?;
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
async fn get_images(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<ImageSummary>> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let images = self
|
||||
.periphery
|
||||
.image_list(&server)
|
||||
.await
|
||||
.context(format!("failed to get images from server {}", server.name))?;
|
||||
Ok(images)
|
||||
}
|
||||
|
||||
async fn get_containers(
|
||||
&self,
|
||||
server_id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<BasicContainerInfo>> {
|
||||
let server = self
|
||||
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let containers = self
|
||||
.periphery
|
||||
.container_list(&server)
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to get containers from server {}",
|
||||
server.name
|
||||
))?;
|
||||
Ok(containers)
|
||||
}
|
||||
|
||||
async fn get_github_accounts(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<String>> {
|
||||
let server = self
|
||||
.get_server_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let github_accounts = self.periphery.get_github_accounts(&server).await?;
|
||||
Ok(github_accounts)
|
||||
}
|
||||
|
||||
async fn get_docker_accounts(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<String>> {
|
||||
let server = self
|
||||
.get_server_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let docker_accounts = self.periphery.get_docker_accounts(&server).await?;
|
||||
Ok(docker_accounts)
|
||||
}
|
||||
|
||||
async fn get_available_secrets(
|
||||
&self,
|
||||
id: &str,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<String>> {
|
||||
let server = self
|
||||
.get_server_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let vars = self.periphery.get_available_secrets(&server).await?;
|
||||
Ok(vars)
|
||||
}
|
||||
|
||||
async fn get_server_action_states(
|
||||
&self,
|
||||
id: String,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<ServerActionState> {
|
||||
self.get_server_check_permissions(&id, &user, PermissionLevel::Read)
|
||||
.await?;
|
||||
let action_state = self.server_action_states.get_or_default(id).await;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{extract::Query, routing::get, Extension, Json, Router};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::{doc, to_bson};
|
||||
use serde_json::Value;
|
||||
use types::{PermissionLevel, Update, UpdateTarget};
|
||||
|
||||
use crate::{
|
||||
auth::{RequestUser, RequestUserExtension},
|
||||
helpers::parse_comma_seperated_list,
|
||||
response,
|
||||
state::{State, StateExtension},
|
||||
};
|
||||
|
||||
const NUM_UPDATES_PER_PAGE: usize = 20;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new().route(
|
||||
"/list",
|
||||
get(
|
||||
|Extension(state): StateExtension,
|
||||
Extension(user): RequestUserExtension,
|
||||
Query(value): Query<Value>| async move {
|
||||
let offset = value
|
||||
.get("offset")
|
||||
.map(|v| v.as_str().unwrap_or("0").parse().unwrap_or(0))
|
||||
.unwrap_or(0);
|
||||
let target = serde_json::from_str::<UpdateTarget>(&value.to_string()).ok();
|
||||
let show_builds = value
|
||||
.get("show_builds")
|
||||
.map(|b| {
|
||||
b.as_str()
|
||||
.unwrap_or("false")
|
||||
.parse::<bool>()
|
||||
.unwrap_or_default()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let operations = value
|
||||
.get("operations")
|
||||
.map(|o| {
|
||||
let o = o.as_str().unwrap_or_default();
|
||||
if o.len() == 0 {
|
||||
return None;
|
||||
}
|
||||
parse_comma_seperated_list::<String>(o).ok()
|
||||
})
|
||||
.flatten();
|
||||
let updates = state
|
||||
.list_updates(target, offset, show_builds, operations, &user)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(Json(updates))
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
impl State {
|
||||
async fn permission_on_update_target(
|
||||
&self,
|
||||
update_target: &UpdateTarget,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<()> {
|
||||
if user.is_admin {
|
||||
Ok(())
|
||||
} else {
|
||||
match update_target {
|
||||
UpdateTarget::System => {
|
||||
if user.is_admin {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("user must be admin to see system updates"))
|
||||
}
|
||||
}
|
||||
UpdateTarget::Build(id) => self
|
||||
.get_build_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await
|
||||
.map(|_| ()),
|
||||
UpdateTarget::Deployment(id) => self
|
||||
.get_deployment_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await
|
||||
.map(|_| ()),
|
||||
UpdateTarget::Server(id) => self
|
||||
.get_server_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await
|
||||
.map(|_| ()),
|
||||
UpdateTarget::Procedure(id) => self
|
||||
.get_procedure_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await
|
||||
.map(|_| ()),
|
||||
UpdateTarget::Group(id) => self
|
||||
.get_group_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await
|
||||
.map(|_| ()),
|
||||
UpdateTarget::Command(id) => self
|
||||
.get_command_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await
|
||||
.map(|_| ()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn list_updates(
|
||||
&self,
|
||||
target: Option<UpdateTarget>,
|
||||
offset: u64,
|
||||
show_builds: bool,
|
||||
operations: Option<Vec<String>>,
|
||||
user: &RequestUser,
|
||||
) -> anyhow::Result<Vec<Update>> {
|
||||
let mut filter = match target {
|
||||
Some(target) => {
|
||||
if let (UpdateTarget::Deployment(id), true) = (&target, show_builds) {
|
||||
let deployment = self
|
||||
.get_deployment_check_permissions(id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
if let Some(build_id) = &deployment.build_id {
|
||||
let build = self
|
||||
.get_build_check_permissions(build_id, user, PermissionLevel::Read)
|
||||
.await;
|
||||
if let Ok(_) = build {
|
||||
doc! {
|
||||
"$or": [
|
||||
{"target": to_bson(&target).unwrap()},
|
||||
{"target": { "type": "Build", "id": build_id }, "operation": "build_build"}
|
||||
],
|
||||
}
|
||||
} else {
|
||||
doc! {
|
||||
"target": to_bson(&target).unwrap()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
doc! {
|
||||
"target": to_bson(&target).unwrap()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.permission_on_update_target(&target, user).await?;
|
||||
doc! {
|
||||
"target": to_bson(&target).unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if user.is_admin {
|
||||
doc! {}
|
||||
} else {
|
||||
let permissions_field = format!("permissions.{}", user.id);
|
||||
let target_filter = doc! {
|
||||
"$or": [
|
||||
{ &permissions_field: "update" },
|
||||
{ &permissions_field: "execute" },
|
||||
{ &permissions_field: "read" },
|
||||
]
|
||||
};
|
||||
let build_ids = self
|
||||
.db
|
||||
.builds
|
||||
.get_some(target_filter.clone(), None)
|
||||
.await
|
||||
.context("failed at query to get users builds")?
|
||||
.into_iter()
|
||||
.map(|e| e.id)
|
||||
.collect::<Vec<_>>();
|
||||
let deployment_ids = self
|
||||
.db
|
||||
.deployments
|
||||
.get_some(target_filter.clone(), None)
|
||||
.await
|
||||
.context("failed at query to get users deployments")?
|
||||
.into_iter()
|
||||
.map(|e| e.id)
|
||||
.collect::<Vec<_>>();
|
||||
let server_ids = self
|
||||
.db
|
||||
.servers
|
||||
.get_some(target_filter.clone(), None)
|
||||
.await
|
||||
.context("failed at query to get users servers")?
|
||||
.into_iter()
|
||||
.map(|e| e.id)
|
||||
.collect::<Vec<_>>();
|
||||
let procedure_ids = self
|
||||
.db
|
||||
.procedures
|
||||
.get_some(target_filter, None)
|
||||
.await
|
||||
.context("failed at query to get users procedures")?
|
||||
.into_iter()
|
||||
.map(|e| e.id)
|
||||
.collect::<Vec<_>>();
|
||||
let filter = doc! {
|
||||
"$or": [
|
||||
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
|
||||
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
|
||||
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
|
||||
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } }
|
||||
]
|
||||
};
|
||||
filter
|
||||
}
|
||||
}
|
||||
};
|
||||
if let Some(operations) = operations {
|
||||
filter.insert("operation", doc! { "$in": operations });
|
||||
}
|
||||
let mut updates = self
|
||||
.db
|
||||
.updates
|
||||
.get_most_recent(
|
||||
"start_ts",
|
||||
NUM_UPDATES_PER_PAGE as i64,
|
||||
offset,
|
||||
filter,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("mongo get most recent updates query failed")?;
|
||||
updates.reverse();
|
||||
Ok(updates)
|
||||
}
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
|
||||
use axum_oauth2::github::{GithubOauthClient, GithubOauthExtension};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use serde::Deserialize;
|
||||
use types::{monitor_timestamp, CoreConfig, User};
|
||||
|
||||
use crate::{response, state::StateExtension};
|
||||
|
||||
use super::JwtExtension;
|
||||
|
||||
pub fn router(config: &CoreConfig) -> Router {
|
||||
let client = GithubOauthClient::new(
|
||||
config.github_oauth.id.clone(),
|
||||
config.github_oauth.secret.clone(),
|
||||
format!("{}/auth/github/callback", config.host),
|
||||
&[],
|
||||
"monitor".to_string(),
|
||||
);
|
||||
Router::new()
|
||||
.route(
|
||||
"/login",
|
||||
get(|Extension(client): GithubOauthExtension| async move {
|
||||
Redirect::to(&client.get_login_redirect_url())
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/callback",
|
||||
get(|client, jwt, state, query| async {
|
||||
let redirect = callback(client, jwt, state, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(redirect)
|
||||
}),
|
||||
)
|
||||
.layer(Extension(Arc::new(client)))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct CallbackQuery {
|
||||
state: String,
|
||||
code: String,
|
||||
}
|
||||
|
||||
async fn callback(
|
||||
Extension(client): GithubOauthExtension,
|
||||
Extension(jwt_client): JwtExtension,
|
||||
Extension(state): StateExtension,
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
if !client.check_state(&query.state) {
|
||||
return Err(anyhow!("state mismatch"));
|
||||
}
|
||||
let token = client.get_access_token(&query.code).await?;
|
||||
let github_user = client.get_github_user(&token.access_token).await?;
|
||||
let github_id = github_user.id.to_string();
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one(doc! { "github_id": &github_id }, None)
|
||||
.await
|
||||
.context("failed at find user query from mongo")?;
|
||||
let jwt = match user {
|
||||
Some(user) => jwt_client
|
||||
.generate(user.id)
|
||||
.context("failed to generate jwt")?,
|
||||
None => {
|
||||
let ts = monitor_timestamp();
|
||||
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
|
||||
let user = User {
|
||||
username: github_user.login,
|
||||
avatar: github_user.avatar_url.into(),
|
||||
github_id: github_id.into(),
|
||||
enabled: no_users_exist,
|
||||
admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
created_at: ts.clone(),
|
||||
updated_at: ts,
|
||||
..Default::default()
|
||||
};
|
||||
let user_id = state
|
||||
.db
|
||||
.users
|
||||
.create_one(user)
|
||||
.await
|
||||
.context("failed to create user on mongo")?;
|
||||
jwt_client
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
}
|
||||
};
|
||||
let exchange_token = jwt_client.create_exchange_token(jwt);
|
||||
Ok(Redirect::to(&format!(
|
||||
"{}?token={exchange_token}",
|
||||
state.config.host
|
||||
)))
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
|
||||
use axum_oauth2::google::{GoogleOauthClient, GoogleOauthExtension};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use serde::Deserialize;
|
||||
use types::{monitor_timestamp, CoreConfig, User};
|
||||
|
||||
use crate::{response, state::StateExtension};
|
||||
|
||||
use super::JwtExtension;
|
||||
|
||||
pub fn router(config: &CoreConfig) -> Router {
|
||||
let client = GoogleOauthClient::new(
|
||||
config.google_oauth.id.clone(),
|
||||
config.google_oauth.secret.clone(),
|
||||
format!("{}/auth/google/callback", config.host),
|
||||
&[
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
],
|
||||
"monitor".to_string(),
|
||||
);
|
||||
Router::new()
|
||||
.route(
|
||||
"/login",
|
||||
get(|Extension(client): GoogleOauthExtension| async move {
|
||||
Redirect::to(&client.get_login_redirect_url())
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/callback",
|
||||
get(|client, jwt, state, query| async {
|
||||
let redirect = callback(client, jwt, state, query)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
response!(redirect)
|
||||
}),
|
||||
)
|
||||
.layer(Extension(Arc::new(client)))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct CallbackQuery {
|
||||
state: Option<String>,
|
||||
code: Option<String>,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
async fn callback(
|
||||
Extension(client): GoogleOauthExtension,
|
||||
Extension(jwt_client): JwtExtension,
|
||||
Extension(state): StateExtension,
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
if let Some(error) = query.error {
|
||||
return Err(anyhow!("auth error from google: {error}"));
|
||||
}
|
||||
if !client.check_state(
|
||||
&query
|
||||
.state
|
||||
.ok_or(anyhow!("callback query does not contain state"))?,
|
||||
) {
|
||||
return Err(anyhow!("state mismatch"));
|
||||
}
|
||||
let token = client
|
||||
.get_access_token(
|
||||
&query
|
||||
.code
|
||||
.ok_or(anyhow!("callback query does not contain code"))?,
|
||||
)
|
||||
.await?;
|
||||
let google_user = client.get_google_user(&token.id_token)?;
|
||||
let google_id = google_user.id.to_string();
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one(doc! { "google_id": &google_id }, None)
|
||||
.await
|
||||
.context("failed at find user query from mongo")?;
|
||||
let jwt = match user {
|
||||
Some(user) => jwt_client
|
||||
.generate(user.id)
|
||||
.context("failed to generate jwt")?,
|
||||
None => {
|
||||
let ts = monitor_timestamp();
|
||||
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
|
||||
let user = User {
|
||||
username: google_user
|
||||
.email
|
||||
.split("@")
|
||||
.collect::<Vec<&str>>()
|
||||
.get(0)
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
avatar: google_user.picture.into(),
|
||||
google_id: google_id.into(),
|
||||
enabled: no_users_exist,
|
||||
admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
created_at: ts.clone(),
|
||||
updated_at: ts,
|
||||
..Default::default()
|
||||
};
|
||||
let user_id = state
|
||||
.db
|
||||
.users
|
||||
.create_one(user)
|
||||
.await
|
||||
.context("failed to create user on mongo")?;
|
||||
jwt_client
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
}
|
||||
};
|
||||
let exchange_token = jwt_client.create_exchange_token(jwt);
|
||||
Ok(Redirect::to(&format!(
|
||||
"{}?token={exchange_token}",
|
||||
state.config.host
|
||||
)))
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{get_timelength_in_ms, unix_timestamp_ms, Timelength};
|
||||
use axum::{body::Body, http::Request, Extension};
|
||||
use axum_oauth2::random_string;
|
||||
use hmac::{Hmac, Mac};
|
||||
use jwt::{SignWithKey, VerifyWithKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use types::{CoreConfig, User};
|
||||
|
||||
use crate::state::State;
|
||||
|
||||
pub type JwtExtension = Extension<Arc<JwtClient>>;
|
||||
pub type RequestUserExtension = Extension<Arc<RequestUser>>;
|
||||
|
||||
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RequestUser {
|
||||
pub id: String,
|
||||
pub is_admin: bool,
|
||||
pub create_server_permissions: bool,
|
||||
pub create_build_permissions: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct JwtClaims {
|
||||
pub id: String,
|
||||
pub iat: u128,
|
||||
pub exp: u128,
|
||||
}
|
||||
|
||||
pub struct JwtClient {
|
||||
key: Hmac<Sha256>,
|
||||
valid_for_ms: u128,
|
||||
exchange_tokens: ExchangeTokenMap,
|
||||
}
|
||||
|
||||
impl JwtClient {
|
||||
pub fn extension(config: &CoreConfig) -> JwtExtension {
|
||||
let key = Hmac::new_from_slice(config.jwt_secret.as_bytes())
|
||||
.expect("failed at taking HmacSha256 of jwt secret");
|
||||
let client = JwtClient {
|
||||
key,
|
||||
valid_for_ms: get_timelength_in_ms(config.jwt_valid_for.to_string().parse().unwrap()),
|
||||
exchange_tokens: Default::default(),
|
||||
};
|
||||
Extension(Arc::new(client))
|
||||
}
|
||||
|
||||
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
|
||||
let iat = unix_timestamp_ms();
|
||||
let exp = iat + self.valid_for_ms;
|
||||
let claims = JwtClaims {
|
||||
id: user_id,
|
||||
iat,
|
||||
exp,
|
||||
};
|
||||
let jwt = claims
|
||||
.sign_with_key(&self.key)
|
||||
.context("failed at signing claim")?;
|
||||
Ok(jwt)
|
||||
}
|
||||
|
||||
pub async fn authenticate_check_enabled(
|
||||
&self,
|
||||
req: &Request<Body>,
|
||||
) -> anyhow::Result<Arc<RequestUser>> {
|
||||
let jwt = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.ok_or(anyhow!(
|
||||
"no authorization header provided. must be Bearer <jwt_token>"
|
||||
))?
|
||||
.to_str()?
|
||||
.replace("Bearer ", "")
|
||||
.replace("bearer ", "");
|
||||
let state = req
|
||||
.extensions()
|
||||
.get::<Arc<State>>()
|
||||
.ok_or(anyhow!("failed at getting state handle"))?;
|
||||
let user = self
|
||||
.auth_jwt_check_enabled(&jwt, &state)
|
||||
.await
|
||||
.context("failed to authenticate jwt")?;
|
||||
Ok(Arc::new(user))
|
||||
}
|
||||
|
||||
pub async fn auth_jwt_check_enabled(
|
||||
&self,
|
||||
jwt: &str,
|
||||
state: &State,
|
||||
) -> anyhow::Result<RequestUser> {
|
||||
let claims: JwtClaims = jwt
|
||||
.verify_with_key(&self.key)
|
||||
.context("failed to verify claims")?;
|
||||
if claims.exp > unix_timestamp_ms() {
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&claims.id)
|
||||
.await?
|
||||
.ok_or(anyhow!("did not find user with id {}", claims.id))?;
|
||||
if user.enabled {
|
||||
let user = RequestUser {
|
||||
id: claims.id,
|
||||
is_admin: user.admin,
|
||||
create_server_permissions: user.create_server_permissions,
|
||||
create_build_permissions: user.create_build_permissions,
|
||||
};
|
||||
Ok(user)
|
||||
} else {
|
||||
Err(anyhow!("user not enabled"))
|
||||
}
|
||||
} else {
|
||||
Err(anyhow!("token has expired"))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate(&self, req: &Request<Body>) -> anyhow::Result<User> {
|
||||
let jwt = req
|
||||
.headers()
|
||||
.get("authorization")
|
||||
.ok_or(anyhow!(
|
||||
"no authorization header provided. must be Bearer <jwt_token>"
|
||||
))?
|
||||
.to_str()?
|
||||
.replace("Bearer ", "")
|
||||
.replace("bearer ", "");
|
||||
let state = req
|
||||
.extensions()
|
||||
.get::<Arc<State>>()
|
||||
.ok_or(anyhow!("failed at getting state handle"))?;
|
||||
let user = self
|
||||
.auth_jwt(&jwt, &state)
|
||||
.await
|
||||
.context("failed to authenticate jwt")?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn auth_jwt(&self, jwt: &str, state: &State) -> anyhow::Result<User> {
|
||||
let claims: JwtClaims = jwt
|
||||
.verify_with_key(&self.key)
|
||||
.context("failed to verify claims")?;
|
||||
if claims.exp > unix_timestamp_ms() {
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one_by_id(&claims.id)
|
||||
.await?
|
||||
.ok_or(anyhow!("did not find user with id {}", claims.id))?;
|
||||
Ok(user)
|
||||
} else {
|
||||
Err(anyhow!("token has expired"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_exchange_token(&self, jwt: String) -> String {
|
||||
let exchange_token = random_string(40);
|
||||
self.exchange_tokens.lock().unwrap().insert(
|
||||
exchange_token.clone(),
|
||||
(
|
||||
jwt,
|
||||
unix_timestamp_ms() + get_timelength_in_ms(Timelength::OneMinute),
|
||||
),
|
||||
);
|
||||
exchange_token
|
||||
}
|
||||
|
||||
pub fn redeem_exchange_token(&self, exchange_token: &str) -> anyhow::Result<String> {
|
||||
let (jwt, valid_until) = self
|
||||
.exchange_tokens
|
||||
.lock()
|
||||
.unwrap()
|
||||
.remove(exchange_token)
|
||||
.ok_or(anyhow!("invalid exchange token: unrecognized"))?;
|
||||
if unix_timestamp_ms() < valid_until {
|
||||
Ok(jwt)
|
||||
} else {
|
||||
Err(anyhow!("invalid exchange token: expired"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{extract::Json, routing::post, Extension, Router};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use types::{monitor_timestamp, User, UserCredentials};
|
||||
|
||||
use crate::state::StateExtension;
|
||||
|
||||
use super::jwt::JwtExtension;
|
||||
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/create_user",
|
||||
post(|db, jwt, body| async {
|
||||
create_user_handler(db, jwt, body)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/login",
|
||||
post(|db, jwt, body| async {
|
||||
login_handler(db, jwt, body)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
async fn create_user_handler(
|
||||
Extension(state): StateExtension,
|
||||
Extension(jwt): JwtExtension,
|
||||
Json(UserCredentials { username, password }): Json<UserCredentials>,
|
||||
) -> anyhow::Result<String> {
|
||||
let password = bcrypt::hash(password, BCRYPT_COST).context("failed to hash password")?;
|
||||
|
||||
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
|
||||
|
||||
let ts = monitor_timestamp();
|
||||
|
||||
let user = User {
|
||||
username,
|
||||
password: Some(password),
|
||||
enabled: no_users_exist,
|
||||
admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
created_at: ts.clone(),
|
||||
updated_at: ts,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let user_id = state
|
||||
.db
|
||||
.users
|
||||
.create_one(user)
|
||||
.await
|
||||
.context("failed to create user")?;
|
||||
|
||||
let jwt = jwt
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt for user")?;
|
||||
|
||||
Ok(jwt)
|
||||
}
|
||||
|
||||
async fn login_handler(
|
||||
Extension(state): StateExtension,
|
||||
Extension(jwt): JwtExtension,
|
||||
Json(UserCredentials { username, password }): Json<UserCredentials>,
|
||||
) -> anyhow::Result<String> {
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one(doc! { "username": &username }, None)
|
||||
.await
|
||||
.context("failed at mongo query")?
|
||||
.ok_or(anyhow!("did not find user with username {username}"))?;
|
||||
|
||||
let user_pw_hash = user
|
||||
.password
|
||||
.ok_or(anyhow!("invalid login, user does not have password login"))?;
|
||||
|
||||
let verified = bcrypt::verify(password, &user_pw_hash).context("failed at verify password")?;
|
||||
|
||||
if !verified {
|
||||
return Err(anyhow!("invalid credentials"));
|
||||
}
|
||||
|
||||
let jwt = jwt
|
||||
.generate(user.id)
|
||||
.context("failed at generating jwt for user")?;
|
||||
|
||||
Ok(jwt)
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::{Request, StatusCode},
|
||||
middleware::Next,
|
||||
response::Response,
|
||||
routing::{get, post},
|
||||
Extension, Json, Router,
|
||||
};
|
||||
use helpers::handle_anyhow_error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use types::CoreConfig;
|
||||
use typeshare::typeshare;
|
||||
|
||||
mod github;
|
||||
mod google;
|
||||
mod jwt;
|
||||
mod local;
|
||||
mod secret;
|
||||
|
||||
use crate::state::StateExtension;
|
||||
|
||||
pub use self::jwt::{JwtClaims, JwtClient, JwtExtension, RequestUser, RequestUserExtension};
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize)]
|
||||
struct LoginOptions {
|
||||
local: bool,
|
||||
github: bool,
|
||||
google: bool,
|
||||
}
|
||||
|
||||
pub fn router(config: &CoreConfig) -> Router {
|
||||
let mut router = Router::new()
|
||||
.route(
|
||||
"/options",
|
||||
get(|Extension(state): StateExtension| async move {
|
||||
Json(LoginOptions {
|
||||
local: state.config.local_auth,
|
||||
github: state.config.github_oauth.enabled
|
||||
&& state.config.github_oauth.id.len() > 0
|
||||
&& state.config.github_oauth.secret.len() > 0,
|
||||
google: state.config.google_oauth.enabled
|
||||
&& state.config.google_oauth.id.len() > 0
|
||||
&& state.config.google_oauth.secret.len() > 0,
|
||||
})
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/exchange",
|
||||
post(|jwt, body| async {
|
||||
exchange_for_jwt(jwt, body)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)
|
||||
}),
|
||||
)
|
||||
.nest("/secret", secret::router());
|
||||
|
||||
if config.local_auth {
|
||||
router = router.nest("/local", local::router());
|
||||
}
|
||||
|
||||
if config.github_oauth.enabled
|
||||
&& config.github_oauth.id.len() > 0
|
||||
&& config.github_oauth.secret.len() > 0
|
||||
{
|
||||
router = router.nest("/github", github::router(config));
|
||||
}
|
||||
|
||||
if config.google_oauth.enabled
|
||||
&& config.google_oauth.id.len() > 0
|
||||
&& config.google_oauth.secret.len() > 0
|
||||
{
|
||||
router = router.nest("/google", google::router(config));
|
||||
}
|
||||
|
||||
router
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize)]
|
||||
struct TokenExchangeBody {
|
||||
token: String,
|
||||
}
|
||||
|
||||
async fn exchange_for_jwt(
|
||||
Extension(jwt): JwtExtension,
|
||||
Json(body): Json<TokenExchangeBody>,
|
||||
) -> anyhow::Result<String> {
|
||||
let jwt = jwt.redeem_exchange_token(&body.token)?;
|
||||
Ok(jwt)
|
||||
}
|
||||
|
||||
pub async fn auth_request(
|
||||
mut req: Request<Body>,
|
||||
next: Next<Body>,
|
||||
) -> Result<Response, (StatusCode, String)> {
|
||||
let jwt_client = req.extensions().get::<Arc<JwtClient>>().ok_or((
|
||||
StatusCode::UNAUTHORIZED,
|
||||
"failed to get jwt client extension".to_string(),
|
||||
))?;
|
||||
let user = jwt_client
|
||||
.authenticate_check_enabled(&req)
|
||||
.await
|
||||
.map_err(|e| (StatusCode::UNAUTHORIZED, format!("{e:#?}")))?;
|
||||
req.extensions_mut().insert(user);
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use axum::{routing::post, Extension, Json, Router};
|
||||
use helpers::handle_anyhow_error;
|
||||
use mungos::{
|
||||
mongodb::bson::{doc, Document},
|
||||
Update,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use types::unix_from_monitor_ts;
|
||||
|
||||
use crate::state::StateExtension;
|
||||
|
||||
use super::JwtExtension;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct SecretLoginBody {
|
||||
username: String,
|
||||
secret: String,
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new().route(
|
||||
"/login",
|
||||
post(|db, jwt, body| async { login(db, jwt, body).await.map_err(handle_anyhow_error) }),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn login(
|
||||
Extension(state): StateExtension,
|
||||
Extension(jwt): JwtExtension,
|
||||
Json(SecretLoginBody { username, secret }): Json<SecretLoginBody>,
|
||||
) -> anyhow::Result<String> {
|
||||
let user = state
|
||||
.db
|
||||
.users
|
||||
.find_one(doc! { "username": &username }, None)
|
||||
.await
|
||||
.context("failed at mongo query")?
|
||||
.ok_or(anyhow!("did not find user with username {username}"))?;
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
for s in user.secrets {
|
||||
if let Some(expires) = s.expires {
|
||||
let expires = unix_from_monitor_ts(&expires)?;
|
||||
if expires < ts {
|
||||
state
|
||||
.db
|
||||
.users
|
||||
.update_one::<Document>(
|
||||
&user.id,
|
||||
Update::Custom(doc! { "$pull": { "secrets": { "name": s.name } } }),
|
||||
)
|
||||
.await
|
||||
.context("failed to remove expired secret")?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if bcrypt::verify(&secret, &s.hash).context("failed at verifying hash")? {
|
||||
let jwt = jwt
|
||||
.generate(user.id)
|
||||
.context("failed at generating jwt for user")?;
|
||||
return Ok(jwt);
|
||||
}
|
||||
}
|
||||
Err(anyhow!("invalid secret"))
|
||||
}
|
||||
@@ -1,199 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use aws_sdk_ec2::{
|
||||
config::Region,
|
||||
types::{
|
||||
BlockDeviceMapping, EbsBlockDevice, InstanceNetworkInterfaceSpecification,
|
||||
InstanceStateChange, InstanceStateName, InstanceStatus, InstanceType, ResourceType, Tag,
|
||||
TagSpecification,
|
||||
},
|
||||
Client,
|
||||
};
|
||||
use types::Server;
|
||||
|
||||
pub async fn create_ec2_client(
|
||||
region: String,
|
||||
access_key_id: &str,
|
||||
secret_access_key: String,
|
||||
) -> Client {
|
||||
// There may be a better way to pass these keys to client
|
||||
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
|
||||
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
|
||||
let region = Region::new(region);
|
||||
let config = aws_config::from_env().region(region).load().await;
|
||||
let client = Client::new(&config);
|
||||
client
|
||||
}
|
||||
|
||||
pub struct Ec2Instance {
|
||||
pub instance_id: String,
|
||||
pub server: Server,
|
||||
}
|
||||
|
||||
const POLL_RATE_SECS: u64 = 2;
|
||||
const MAX_POLL_TRIES: usize = 30;
|
||||
|
||||
/// this will only resolve after the instance is running
|
||||
/// should still poll the periphery agent after creation
|
||||
pub async fn create_instance_with_ami(
|
||||
client: &Client,
|
||||
instance_name: &str,
|
||||
ami_id: &str,
|
||||
instance_type: &str,
|
||||
subnet_id: &str,
|
||||
security_group_ids: Vec<String>,
|
||||
volume_size_gb: i32,
|
||||
key_pair_name: &str,
|
||||
assign_public_ip: bool,
|
||||
) -> anyhow::Result<Ec2Instance> {
|
||||
let instance_type = InstanceType::from(instance_type);
|
||||
if let InstanceType::Unknown(t) = instance_type {
|
||||
return Err(anyhow!("unknown instance type {t:?}"));
|
||||
}
|
||||
let res = client
|
||||
.run_instances()
|
||||
.image_id(ami_id)
|
||||
.instance_type(instance_type)
|
||||
.block_device_mappings(
|
||||
BlockDeviceMapping::builder()
|
||||
.set_device_name(String::from("/dev/sda1").into())
|
||||
.set_ebs(
|
||||
EbsBlockDevice::builder()
|
||||
.volume_size(volume_size_gb)
|
||||
.build()
|
||||
.into(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.network_interfaces(
|
||||
InstanceNetworkInterfaceSpecification::builder()
|
||||
.subnet_id(subnet_id)
|
||||
.associate_public_ip_address(assign_public_ip)
|
||||
.set_groups(security_group_ids.into())
|
||||
.device_index(0)
|
||||
.build(),
|
||||
)
|
||||
.key_name(key_pair_name)
|
||||
.tag_specifications(
|
||||
TagSpecification::builder()
|
||||
.tags(Tag::builder().key("Name").value(instance_name).build())
|
||||
.resource_type(ResourceType::Instance)
|
||||
.build(),
|
||||
)
|
||||
.min_count(1)
|
||||
.max_count(1)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to start builder ec2 instance")?;
|
||||
let instance = res
|
||||
.instances()
|
||||
.ok_or(anyhow!("got None for created instances"))?
|
||||
.get(0)
|
||||
.ok_or(anyhow!("instances array is empty"))?;
|
||||
let instance_id = instance
|
||||
.instance_id()
|
||||
.ok_or(anyhow!("instance does not have instance_id"))?
|
||||
.to_string();
|
||||
for _ in 0..MAX_POLL_TRIES {
|
||||
let state_name = get_ec2_instance_state_name(&client, &instance_id).await?;
|
||||
if state_name == Some(InstanceStateName::Running) {
|
||||
let ip = if assign_public_ip {
|
||||
get_ec2_instance_public_ip(client, &instance_id).await?
|
||||
} else {
|
||||
instance
|
||||
.private_ip_address()
|
||||
.ok_or(anyhow!("instance does not have private ip"))?
|
||||
.to_string()
|
||||
};
|
||||
let server = Server {
|
||||
address: format!("http://{ip}:8000"),
|
||||
..Default::default()
|
||||
};
|
||||
return Ok(Ec2Instance {
|
||||
instance_id,
|
||||
server,
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
|
||||
}
|
||||
Err(anyhow!("instance not running after polling"))
|
||||
}
|
||||
|
||||
pub async fn get_ec2_instance_status(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
) -> anyhow::Result<Option<InstanceStatus>> {
|
||||
let status = client
|
||||
.describe_instance_status()
|
||||
.instance_ids(instance_id)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to get instance status from aws")?
|
||||
.instance_statuses()
|
||||
.ok_or(anyhow!("instance statuses is None"))?
|
||||
.get(0)
|
||||
.map(|s| s.to_owned());
|
||||
Ok(status)
|
||||
}
|
||||
|
||||
pub async fn get_ec2_instance_state_name(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
) -> anyhow::Result<Option<InstanceStateName>> {
|
||||
let status = get_ec2_instance_status(client, instance_id).await?;
|
||||
if status.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
let state = status
|
||||
.unwrap()
|
||||
.instance_state()
|
||||
.ok_or(anyhow!("instance state is None"))?
|
||||
.name()
|
||||
.ok_or(anyhow!("instance state name is None"))?
|
||||
.to_owned();
|
||||
Ok(Some(state))
|
||||
}
|
||||
|
||||
pub async fn get_ec2_instance_public_ip(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let ip = client
|
||||
.describe_instances()
|
||||
.instance_ids(instance_id)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to get instance status from aws")?
|
||||
.reservations()
|
||||
.ok_or(anyhow!("instance reservations is None"))?
|
||||
.get(0)
|
||||
.ok_or(anyhow!("instance reservations is empty"))?
|
||||
.instances()
|
||||
.ok_or(anyhow!("instances is None"))?
|
||||
.get(0)
|
||||
.ok_or(anyhow!("instances is empty"))?
|
||||
.public_ip_address()
|
||||
.ok_or(anyhow!("instance has no public ip"))?
|
||||
.to_string();
|
||||
|
||||
Ok(ip)
|
||||
}
|
||||
|
||||
pub async fn terminate_ec2_instance(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
) -> anyhow::Result<InstanceStateChange> {
|
||||
let res = client
|
||||
.terminate_instances()
|
||||
.instance_ids(instance_id)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to terminate instance from aws")?
|
||||
.terminating_instances()
|
||||
.ok_or(anyhow!("terminating instances is None"))?
|
||||
.get(0)
|
||||
.ok_or(anyhow!("terminating instances is empty"))?
|
||||
.to_owned();
|
||||
Ok(res)
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
pub mod aws;
|
||||
@@ -1,37 +0,0 @@
|
||||
use axum::Router;
|
||||
use dotenv::dotenv;
|
||||
use merge_config_files::parse_config_file;
|
||||
use serde::Deserialize;
|
||||
use tower_http::services::{ServeDir, ServeFile};
|
||||
use types::CoreConfig;
|
||||
|
||||
type SpaRouter = Router;
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Env {
|
||||
#[serde(default = "default_config_path")]
|
||||
pub config_path: String,
|
||||
#[serde(default = "default_frontend_path")]
|
||||
pub frontend_path: String,
|
||||
}
|
||||
|
||||
pub fn load() -> (CoreConfig, SpaRouter, ServeFile) {
|
||||
dotenv().ok();
|
||||
let env: Env = envy::from_env().expect("failed to parse environment variables");
|
||||
let config = parse_config_file(env.config_path).expect("failed to parse config");
|
||||
let spa_router = Router::new().nest_service(
|
||||
"/assets",
|
||||
ServeDir::new(&env.frontend_path)
|
||||
.not_found_service(ServeFile::new(format!("{}/index.html", env.frontend_path))),
|
||||
);
|
||||
let index_html_service = ServeFile::new(format!("{}/index.html", env.frontend_path));
|
||||
(config, spa_router, index_html_service)
|
||||
}
|
||||
|
||||
pub fn default_config_path() -> String {
|
||||
"/config/config.toml".to_string()
|
||||
}
|
||||
|
||||
fn default_frontend_path() -> String {
|
||||
"/frontend".to_string()
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use diff::{Diff, OptionDiff};
|
||||
use helpers::to_monitor_name;
|
||||
use tokio::sync::RwLock;
|
||||
use types::{traits::Busy, Build};
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! response {
|
||||
($x:expr) => {
|
||||
Ok::<_, (axum::http::StatusCode, String)>($x)
|
||||
};
|
||||
}
|
||||
|
||||
pub fn option_diff_is_some<T: Diff>(diff: &OptionDiff<T>) -> bool
|
||||
where
|
||||
<T as Diff>::Repr: PartialEq,
|
||||
{
|
||||
diff != &OptionDiff::NoChange && diff != &OptionDiff::None
|
||||
}
|
||||
|
||||
pub fn any_option_diff_is_some<T: Diff>(diffs: &[&OptionDiff<T>]) -> bool
|
||||
where
|
||||
<T as Diff>::Repr: PartialEq,
|
||||
{
|
||||
for diff in diffs {
|
||||
if diff != &&OptionDiff::NoChange && diff != &&OptionDiff::None {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn parse_comma_seperated_list<T: FromStr>(comma_sep_list: &str) -> anyhow::Result<Vec<T>> {
|
||||
comma_sep_list
|
||||
.split(",")
|
||||
.filter(|item| item.len() > 0)
|
||||
.map(|item| {
|
||||
let item = item
|
||||
.parse()
|
||||
.map_err(|_| anyhow!("error parsing string {item} into type T"))?;
|
||||
Ok::<T, anyhow::Error>(item)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_image_name(build: &Build) -> String {
|
||||
let name = to_monitor_name(&build.name);
|
||||
match &build.docker_organization {
|
||||
Some(org) => format!("{org}/{name}"),
|
||||
None => match &build.docker_account {
|
||||
Some(acct) => format!("{acct}/{name}"),
|
||||
None => name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn empty_or_only_spaces(word: &str) -> bool {
|
||||
if word.len() == 0 {
|
||||
return true;
|
||||
}
|
||||
for char in word.chars() {
|
||||
if char != ' ' {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Cache<T: Clone + Default> {
|
||||
cache: RwLock<HashMap<String, T>>,
|
||||
}
|
||||
|
||||
impl<T: Clone + Default> Cache<T> {
|
||||
pub async fn get(&self, key: &str) -> Option<T> {
|
||||
self.cache.read().await.get(key).map(|e| e.clone())
|
||||
}
|
||||
|
||||
pub async fn get_or_default(&self, key: String) -> T {
|
||||
let mut cache = self.cache.write().await;
|
||||
cache.entry(key).or_default().clone()
|
||||
}
|
||||
|
||||
pub async fn update_entry(&self, key: String, handler: impl Fn(&mut T) -> ()) {
|
||||
let mut cache = self.cache.write().await;
|
||||
handler(cache.entry(key).or_default());
|
||||
}
|
||||
|
||||
pub async fn clear(&self) {
|
||||
self.cache.write().await.clear();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Default + Busy> Cache<T> {
|
||||
pub async fn busy(&self, id: &str) -> bool {
|
||||
match self.get(id).await {
|
||||
Some(state) => state.busy(),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
// #![allow(unused)]
|
||||
|
||||
use ::helpers::get_socket_addr;
|
||||
use auth::JwtClient;
|
||||
use axum::{http::StatusCode, Router};
|
||||
use state::State;
|
||||
use termination_signal::tokio::immediate_term_handle;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
|
||||
mod actions;
|
||||
mod api;
|
||||
mod auth;
|
||||
mod cloud;
|
||||
mod config;
|
||||
mod helpers;
|
||||
mod monitoring;
|
||||
mod state;
|
||||
mod ws;
|
||||
|
||||
type ResponseResult<T> = Result<T, (StatusCode, String)>;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
println!("version: v{}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
let term_signal = immediate_term_handle()?;
|
||||
|
||||
let app = tokio::spawn(async move {
|
||||
let (config, spa_router, index_html_service) = config::load();
|
||||
|
||||
println!("starting monitor core on port {}...", config.port);
|
||||
|
||||
let app = Router::new()
|
||||
.nest("/api", api::router())
|
||||
.nest("/auth", auth::router(&config))
|
||||
.nest("/ws", ws::router())
|
||||
.layer(JwtClient::extension(&config))
|
||||
.layer(State::extension(config.clone()).await)
|
||||
.merge(spa_router)
|
||||
.fallback_service(index_html_service)
|
||||
.layer(
|
||||
CorsLayer::new()
|
||||
.allow_origin(Any)
|
||||
.allow_methods(Any)
|
||||
.allow_headers(Any),
|
||||
);
|
||||
|
||||
println!("started monitor core on port {}", config.port);
|
||||
|
||||
axum::Server::bind(&get_socket_addr(config.port))
|
||||
.serve(app.into_make_service())
|
||||
.await?;
|
||||
|
||||
anyhow::Ok(())
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
res = app => return res?,
|
||||
_ = term_signal => {},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,537 +0,0 @@
|
||||
use std::{cmp::Ordering, collections::HashMap, path::PathBuf};
|
||||
|
||||
use async_timing_util::{
|
||||
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS, ONE_HOUR_MS,
|
||||
};
|
||||
use futures_util::future::join_all;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use slack::types::Block;
|
||||
use types::{Server, SystemStats, SystemStatsQuery, SystemStatsRecord};
|
||||
|
||||
use crate::state::State;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct AlertStatus {
|
||||
cpu_alert: bool,
|
||||
mem_alert: bool,
|
||||
disk_alert: HashMap<PathBuf, bool>,
|
||||
component_alert: bool,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub async fn collect_server_stats(&self) {
|
||||
loop {
|
||||
let ts = wait_until_timelength(
|
||||
self.config.monitoring_interval.to_string().parse().unwrap(),
|
||||
0,
|
||||
)
|
||||
.await as i64;
|
||||
let servers = self.get_enabled_servers_with_stats().await;
|
||||
if let Err(e) = servers {
|
||||
eprintln!("failed to get server list from db: {e:?}");
|
||||
continue;
|
||||
}
|
||||
for (server, res) in servers.unwrap() {
|
||||
if let Err(_) = res {
|
||||
if let Some(slack) = &self.slack {
|
||||
let (header, info) = generate_unreachable_message(&server);
|
||||
let res = slack.send_message_with_header(&header, info.clone()).await;
|
||||
if let Err(e) = res {
|
||||
eprintln!("failed to send message to slack: {e} | header: {header} | info: {info:?}")
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
let stats = res.unwrap();
|
||||
self.check_server_stats(&server, &stats).await;
|
||||
let res = self
|
||||
.db
|
||||
.stats
|
||||
.create_one(SystemStatsRecord::from_stats(server.id, ts, stats))
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!("failed to insert stats into mongo | {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn prune_stats_on_mongo(&self) {
|
||||
let days_ago_ms = self.config.keep_stats_for_days as u128 * ONE_DAY_MS;
|
||||
loop {
|
||||
let ts = wait_until_timelength(Timelength::OneDay, 0).await;
|
||||
let delete_before_ts = ts - days_ago_ms;
|
||||
let res = self
|
||||
.db
|
||||
.stats
|
||||
.delete_many(doc! { "ts": { "$lte": delete_before_ts as i64 } })
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!("{ts} | failed to delete old stats | {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_enabled_servers_with_stats(
|
||||
&self,
|
||||
) -> anyhow::Result<Vec<(Server, anyhow::Result<SystemStats>)>> {
|
||||
let servers = self
|
||||
.db
|
||||
.servers
|
||||
.get_some(doc! { "enabled": true }, None)
|
||||
.await?;
|
||||
|
||||
let futures = servers.into_iter().map(|server| async move {
|
||||
let stats = self
|
||||
.periphery
|
||||
.get_system_stats(&server, &SystemStatsQuery::all())
|
||||
.await;
|
||||
(server, stats)
|
||||
});
|
||||
|
||||
Ok(join_all(futures).await)
|
||||
}
|
||||
|
||||
async fn check_server_stats(&self, server: &Server, stats: &SystemStats) {
|
||||
self.check_cpu(server, stats).await;
|
||||
self.check_mem(server, stats).await;
|
||||
self.check_disk(server, stats).await;
|
||||
self.check_components(server, stats).await;
|
||||
}
|
||||
|
||||
async fn check_cpu(&self, server: &Server, stats: &SystemStats) {
|
||||
if self.slack.is_none()
|
||||
|| self
|
||||
.server_alert_status
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.cpu_alert)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
if stats.cpu_perc > server.cpu_alert {
|
||||
let region = if let Some(region) = &server.region {
|
||||
format!(" ({region})")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let mut top_procs = stats.processes.clone();
|
||||
top_procs.sort_by(|a, b| {
|
||||
if a.cpu_perc > b.cpu_perc {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Greater
|
||||
}
|
||||
});
|
||||
let top_procs = top_procs
|
||||
.into_iter()
|
||||
.take(3)
|
||||
.enumerate()
|
||||
.map(|(i, p)| {
|
||||
format!(
|
||||
"\n{}. *{}* | *{:.1}%* CPU | *{:.1} GiB* MEM",
|
||||
i + 1,
|
||||
p.name,
|
||||
p.cpu_perc,
|
||||
p.mem_mb / 1024.0,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
let mut blocks = vec![
|
||||
Block::header("WARNING 🚨"),
|
||||
Block::section(format!(
|
||||
"*{}*{region} has high *CPU usage* 📈 🚨",
|
||||
server.name
|
||||
)),
|
||||
Block::section(format!("cpu: *{:.1}%*", stats.cpu_perc)),
|
||||
Block::section(format!("*top cpu processes*{top_procs}",)),
|
||||
];
|
||||
|
||||
if let Some(to_notify) = generate_to_notify(server) {
|
||||
blocks.push(Block::section(to_notify))
|
||||
}
|
||||
|
||||
let res = self
|
||||
.slack
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send_message(
|
||||
format!(
|
||||
"WARNING 🚨 | *{}*{region} has high *CPU usage* 📈 🚨",
|
||||
server.name
|
||||
),
|
||||
blocks,
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!(
|
||||
"failed to send message to slack | high cpu usage on {} | usage: {:.1}% | {e:?}",
|
||||
server.name, stats.cpu_perc
|
||||
)
|
||||
} else {
|
||||
self.server_alert_status
|
||||
.update_entry(server.id.clone(), |entry| {
|
||||
entry.cpu_alert = true;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_mem(&self, server: &Server, stats: &SystemStats) {
|
||||
if self.slack.is_none()
|
||||
|| self
|
||||
.server_alert_status
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.mem_alert)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
let usage_perc = (stats.mem_used_gb / stats.mem_total_gb) * 100.0;
|
||||
if usage_perc > server.mem_alert {
|
||||
let region = if let Some(region) = &server.region {
|
||||
format!(" ({region})")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let mut top_procs = stats.processes.clone();
|
||||
top_procs.sort_by(|a, b| {
|
||||
if a.mem_mb > b.mem_mb {
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Greater
|
||||
}
|
||||
});
|
||||
let top_procs = top_procs
|
||||
.into_iter()
|
||||
.take(3)
|
||||
.enumerate()
|
||||
.map(|(i, p)| {
|
||||
format!(
|
||||
"\n{}. *{}* | *{:.1}%* CPU | *{:.1} GiB* MEM",
|
||||
i + 1,
|
||||
p.name,
|
||||
p.cpu_perc,
|
||||
p.mem_mb / 1024.0,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
let mut blocks = vec![
|
||||
Block::header("WARNING 🚨"),
|
||||
Block::section(format!(
|
||||
"*{}*{region} has high *memory usage* 💾 🚨",
|
||||
server.name
|
||||
)),
|
||||
Block::section(format!(
|
||||
"memory: used *{:.2} GB* of *{:.2} GB* (*{:.1}%*)",
|
||||
stats.mem_used_gb, stats.mem_total_gb, usage_perc
|
||||
)),
|
||||
Block::section(format!("*top mem processes*{top_procs}",)),
|
||||
];
|
||||
|
||||
if let Some(to_notify) = generate_to_notify(server) {
|
||||
blocks.push(Block::section(to_notify))
|
||||
}
|
||||
|
||||
let res = self
|
||||
.slack
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send_message(
|
||||
format!(
|
||||
"WARNING 🚨 | *{}*{region} has high *memory usage* 💾 🚨",
|
||||
server.name
|
||||
),
|
||||
blocks,
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!(
|
||||
"failed to send message to slack | high mem usage on {} | usage: {:.2}GB of {:.2}GB | {e:?}",
|
||||
server.name, stats.mem_used_gb, stats.mem_total_gb,
|
||||
)
|
||||
} else {
|
||||
self.server_alert_status
|
||||
.update_entry(server.id.clone(), |entry| {
|
||||
entry.mem_alert = true;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_disk(&self, server: &Server, stats: &SystemStats) {
|
||||
for disk in &stats.disk.disks {
|
||||
if self.slack.is_none()
|
||||
|| self
|
||||
.server_alert_status
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| *s.disk_alert.get(&disk.mount).unwrap_or(&false))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
let usage_perc = (disk.used_gb / disk.total_gb) * 100.0;
|
||||
if usage_perc > server.disk_alert {
|
||||
let region = if let Some(region) = &server.region {
|
||||
format!(" ({region})")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let mut blocks = vec![
|
||||
Block::header("WARNING 🚨"),
|
||||
Block::section(format!(
|
||||
"*{}*{region} has high *disk usage* (mount point *{}*) 💿 🚨",
|
||||
server.name,
|
||||
disk.mount.display()
|
||||
)),
|
||||
Block::section(format!(
|
||||
"disk: used *{:.2} GB* of *{:.2} GB* (*{:.1}%*)",
|
||||
disk.used_gb, disk.total_gb, usage_perc
|
||||
)),
|
||||
];
|
||||
|
||||
if let Some(to_notify) = generate_to_notify(server) {
|
||||
blocks.push(Block::section(to_notify))
|
||||
}
|
||||
|
||||
let res = self
|
||||
.slack
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send_message(
|
||||
format!(
|
||||
"WARNING 🚨 | *{}*{region} has high *disk usage* 💿 🚨",
|
||||
server.name
|
||||
),
|
||||
blocks,
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!(
|
||||
"failed to send message to slack | high disk usage on {} | usage: {:.2}GB of {:.2}GB | {e:?}",
|
||||
server.name, stats.disk.used_gb, stats.disk.total_gb,
|
||||
)
|
||||
} else {
|
||||
self.server_alert_status
|
||||
.update_entry(server.id.clone(), |entry| {
|
||||
entry.disk_alert.insert(disk.mount.clone(), true);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_components(&self, server: &Server, stats: &SystemStats) {
|
||||
if self.slack.is_none()
|
||||
|| self
|
||||
.server_alert_status
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.component_alert)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
let info = stats
|
||||
.components
|
||||
.iter()
|
||||
.map(|c| {
|
||||
if let Some(critical) = c.critical {
|
||||
if c.temp / critical > 0.85 {
|
||||
format!(
|
||||
"{}: *{:.1}°* (*{:.1}%* to critical) 🌡️",
|
||||
c.label,
|
||||
c.temp,
|
||||
(c.temp / critical) * 100.0
|
||||
)
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
})
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect::<Vec<_>>();
|
||||
if info.len() > 0 {
|
||||
let region = if let Some(region) = &server.region {
|
||||
format!(" ({region})")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let mut blocks = vec![
|
||||
Block::header("WARNING 🚨"),
|
||||
Block::section(format!(
|
||||
"*{}*{region} has high *tempurature* 🌡️ 🚨",
|
||||
server.name
|
||||
)),
|
||||
Block::section(info.join("\n")),
|
||||
];
|
||||
|
||||
if let Some(to_notify) = generate_to_notify(server) {
|
||||
blocks.push(Block::section(to_notify))
|
||||
}
|
||||
|
||||
let res = self
|
||||
.slack
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send_message(
|
||||
format!(
|
||||
"WARNING 🚨 | *{}*{region} has high *tempurature* 🌡️ 🚨",
|
||||
server.name
|
||||
),
|
||||
blocks,
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!(
|
||||
"failed to send message to slack | high tempurature on {} | {} | {e:?}",
|
||||
server.name,
|
||||
info.join(" | "),
|
||||
)
|
||||
} else {
|
||||
self.server_alert_status
|
||||
.update_entry(server.id.clone(), |entry| {
|
||||
entry.component_alert = true;
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn daily_update(&self) {
|
||||
let offset = self.config.daily_offset_hours as u128 * ONE_HOUR_MS;
|
||||
loop {
|
||||
wait_until_timelength(Timelength::OneDay, offset).await;
|
||||
let servers = self.get_enabled_servers_with_stats().await;
|
||||
if let Err(e) = &servers {
|
||||
eprintln!(
|
||||
"{} | failed to get servers with stats for daily update | {e:#?}",
|
||||
unix_timestamp_ms()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let servers = servers.unwrap();
|
||||
if servers.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let mut blocks = vec![Block::header("INFO | daily update"), Block::divider()];
|
||||
for (server, stats) in servers {
|
||||
let region = if let Some(region) = &server.region {
|
||||
format!(" | {region}")
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
if let Ok(stats) = stats {
|
||||
let cpu_warning = if stats.cpu_perc > server.cpu_alert {
|
||||
" 🚨"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
let mem_warning =
|
||||
if (stats.mem_used_gb / stats.mem_total_gb) * 100.0 > server.mem_alert {
|
||||
" 🚨"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
let disk_warning =
|
||||
if (stats.disk.used_gb / stats.disk.total_gb) * 100.0 > server.disk_alert {
|
||||
" 🚨"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
let status = if !cpu_warning.is_empty()
|
||||
|| !mem_warning.is_empty()
|
||||
|| !disk_warning.is_empty()
|
||||
{
|
||||
"*WARNING* 🚨"
|
||||
} else {
|
||||
"*OK* ✅"
|
||||
};
|
||||
let name_line = format!("*{}*{region} | {status}", server.name);
|
||||
let cpu_line = format!("CPU: *{:.1}%*{cpu_warning}", stats.cpu_perc);
|
||||
let mem_line = format!(
|
||||
"MEM: *{:.1}%* ({:.2} GB of {:.2} GB){mem_warning}",
|
||||
(stats.mem_used_gb / stats.mem_total_gb) * 100.0,
|
||||
stats.mem_used_gb,
|
||||
stats.mem_total_gb,
|
||||
);
|
||||
let disk_line = format!(
|
||||
"DISK: *{:.1}%* ({:.2} GB of {:.2} GB){disk_warning}",
|
||||
(stats.disk.used_gb / stats.disk.total_gb) * 100.0,
|
||||
stats.disk.used_gb,
|
||||
stats.disk.total_gb,
|
||||
);
|
||||
blocks.push(Block::section(format!(
|
||||
"{name_line}\n{cpu_line}\n{mem_line}\n{disk_line}",
|
||||
)));
|
||||
} else {
|
||||
blocks.push(Block::section(format!(
|
||||
"*{}*{region} | *UNREACHABLE* ❌",
|
||||
server.name
|
||||
)));
|
||||
}
|
||||
blocks.push(Block::divider())
|
||||
}
|
||||
let res = self
|
||||
.slack
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send_message(format!("INFO | daily update"), blocks)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
eprintln!(
|
||||
"{} | failed to send daily update message | {e:?}",
|
||||
unix_timestamp_ms()
|
||||
);
|
||||
}
|
||||
{
|
||||
self.server_alert_status.clear().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_unreachable_message(server: &Server) -> (String, Option<String>) {
|
||||
let region = match &server.region {
|
||||
Some(region) => format!(" ({region})"),
|
||||
None => String::new(),
|
||||
};
|
||||
let header = format!("WARNING 🚨 | {}{region} is unreachable ❌", server.name);
|
||||
let to_notify = server
|
||||
.to_notify
|
||||
.iter()
|
||||
.map(|u| format!("<@{u}>"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
let info = if to_notify.len() > 0 {
|
||||
Some(to_notify)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(header, info)
|
||||
}
|
||||
|
||||
fn generate_to_notify(server: &Server) -> Option<String> {
|
||||
if server.to_notify.len() > 0 {
|
||||
Some(
|
||||
server
|
||||
.to_notify
|
||||
.iter()
|
||||
.map(|u| format!("<@{u}>"))
|
||||
.collect::<Vec<String>>()
|
||||
.join(" "),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use async_timing_util::{unix_timestamp_ms, wait_until_timelength, Timelength, ONE_HOUR_MS};
|
||||
use axum::Extension;
|
||||
use db::DbClient;
|
||||
use futures_util::future::join_all;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use periphery::PeripheryClient;
|
||||
use types::{
|
||||
BuildActionState, CommandActionState, CoreConfig, DeploymentActionState, ServerActionState,
|
||||
};
|
||||
|
||||
use crate::{helpers::Cache, monitoring::AlertStatus, ws::update::UpdateWsChannel};
|
||||
|
||||
pub type StateExtension = Extension<Arc<State>>;
|
||||
|
||||
// pub type Cache<T> = RwLock<HashMap<String, T>>;
|
||||
pub struct State {
|
||||
pub config: CoreConfig,
|
||||
pub db: DbClient,
|
||||
pub update: UpdateWsChannel,
|
||||
pub periphery: PeripheryClient,
|
||||
pub slack: Option<slack::Client>,
|
||||
pub build_action_states: Cache<BuildActionState>,
|
||||
pub deployment_action_states: Cache<DeploymentActionState>,
|
||||
pub server_action_states: Cache<ServerActionState>,
|
||||
pub command_action_states: Cache<CommandActionState>,
|
||||
pub server_alert_status: Cache<AlertStatus>, // (server_id, AlertStatus)
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub async fn new(config: CoreConfig) -> Arc<State> {
|
||||
let state = State {
|
||||
db: DbClient::new(config.mongo.clone()).await,
|
||||
slack: config.slack_url.clone().map(|url| slack::Client::new(&url)),
|
||||
periphery: PeripheryClient::new(config.passkey.clone()),
|
||||
config,
|
||||
update: UpdateWsChannel::new(),
|
||||
build_action_states: Default::default(),
|
||||
deployment_action_states: Default::default(),
|
||||
server_action_states: Default::default(),
|
||||
command_action_states: Default::default(),
|
||||
server_alert_status: Default::default(),
|
||||
};
|
||||
let state = Arc::new(state);
|
||||
let state_clone = state.clone();
|
||||
tokio::spawn(async move { state_clone.collect_server_stats().await });
|
||||
let state_clone = state.clone();
|
||||
tokio::spawn(async move { state_clone.daily_image_prune().await });
|
||||
if state.slack.is_some() {
|
||||
let state_clone = state.clone();
|
||||
tokio::spawn(async move { state_clone.daily_update().await });
|
||||
}
|
||||
if state.config.keep_stats_for_days != 0 {
|
||||
let state_clone = state.clone();
|
||||
tokio::spawn(async move { state_clone.prune_stats_on_mongo().await });
|
||||
}
|
||||
state
|
||||
}
|
||||
|
||||
pub async fn extension(config: CoreConfig) -> StateExtension {
|
||||
Extension(State::new(config).await)
|
||||
}
|
||||
|
||||
async fn daily_image_prune(&self) {
|
||||
let offset = self.config.daily_offset_hours as u128 * ONE_HOUR_MS;
|
||||
loop {
|
||||
wait_until_timelength(Timelength::OneDay, offset).await;
|
||||
let servers = self
|
||||
.db
|
||||
.servers
|
||||
.get_some(doc! { "enabled": true, "auto_prune": true }, None)
|
||||
.await;
|
||||
if let Err(e) = &servers {
|
||||
eprintln!(
|
||||
"{} | failed to get servers for daily prune | {e:#?}",
|
||||
unix_timestamp_ms()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let futures = servers.unwrap().into_iter().map(|server| async move {
|
||||
let _ = self.periphery.image_prune(&server).await;
|
||||
});
|
||||
join_all(futures).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
use axum::{
|
||||
extract::ws::{Message, WebSocket},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
auth::{JwtClient, RequestUser},
|
||||
state::State,
|
||||
};
|
||||
|
||||
mod stats;
|
||||
pub mod update;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/update", get(update::ws_handler))
|
||||
.route("/stats/:id", get(stats::ws_handler))
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub async fn ws_login(
|
||||
&self,
|
||||
mut socket: WebSocket,
|
||||
jwt_client: &JwtClient,
|
||||
) -> Option<(WebSocket, RequestUser)> {
|
||||
if let Some(jwt) = socket.recv().await {
|
||||
match jwt {
|
||||
Ok(jwt) => match jwt {
|
||||
Message::Text(jwt) => {
|
||||
match jwt_client.auth_jwt_check_enabled(&jwt, self).await {
|
||||
Ok(user) => {
|
||||
let _ = socket.send(Message::Text("LOGGED_IN".to_string())).await;
|
||||
Some((socket, user))
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = socket
|
||||
.send(Message::Text(format!(
|
||||
"failed to authenticate user | {e:#?}"
|
||||
)))
|
||||
.await;
|
||||
let _ = socket.close().await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
msg => {
|
||||
let _ = socket
|
||||
.send(Message::Text(format!("invalid login msg: {msg:#?}")))
|
||||
.await;
|
||||
let _ = socket.close().await;
|
||||
None
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
let _ = socket
|
||||
.send(Message::Text(format!("failed to get jwt message: {e:#?}")))
|
||||
.await;
|
||||
let _ = socket.close().await;
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let _ = socket
|
||||
.send(Message::Text(format!("failed to get jwt message")))
|
||||
.await;
|
||||
let _ = socket.close().await;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::{ws::Message as AxumMessage, Path, Query, WebSocketUpgrade},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use helpers::handle_anyhow_error;
|
||||
use serde::Deserialize;
|
||||
use tokio::select;
|
||||
use tokio_tungstenite::tungstenite::Message as TungsteniteMessage;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use types::{traits::Permissioned, PermissionLevel, SystemStatsQuery};
|
||||
|
||||
use crate::{auth::JwtExtension, state::StateExtension, ResponseResult};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ServerId {
|
||||
id: String,
|
||||
}
|
||||
|
||||
pub async fn ws_handler(
|
||||
state: StateExtension,
|
||||
jwt_client: JwtExtension,
|
||||
path: Path<ServerId>,
|
||||
query: Query<SystemStatsQuery>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> ResponseResult<impl IntoResponse> {
|
||||
let server = state
|
||||
.db
|
||||
.get_server(&path.id)
|
||||
.await
|
||||
.map_err(handle_anyhow_error)?;
|
||||
let query = Arc::new(query);
|
||||
let upgrade = ws.on_upgrade(|socket| async move {
|
||||
let login_res = state.ws_login(socket, &jwt_client).await;
|
||||
if login_res.is_none() {
|
||||
return;
|
||||
}
|
||||
let (mut socket, user) = login_res.unwrap();
|
||||
if !user.is_admin && server.get_user_permissions(&user.id) < PermissionLevel::Read {
|
||||
let _ = socket
|
||||
.send(AxumMessage::Text(
|
||||
"permission denied. user must have at least read permissions on this server"
|
||||
.to_string(),
|
||||
))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
let (mut ws_sender, mut ws_reciever) = socket.split();
|
||||
let res = state.periphery.subscribe_to_stats_ws(&server, &query).await;
|
||||
if let Err(e) = &res {
|
||||
let _ = ws_sender
|
||||
.send(AxumMessage::Text(format!("ERROR: {e}")))
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
let mut stats_recv = res.unwrap();
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let stats = select! {
|
||||
_ = cancel_clone.cancelled() => {
|
||||
let _ = stats_recv.close(None).await;
|
||||
break
|
||||
},
|
||||
stats = stats_recv.next() => stats,
|
||||
};
|
||||
if let Some(Ok(TungsteniteMessage::Text(msg))) = stats {
|
||||
let _ = ws_sender.send(AxumMessage::Text(msg)).await;
|
||||
} else {
|
||||
let _ = stats_recv.close(None).await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
while let Some(msg) = ws_reciever.next().await {
|
||||
match msg {
|
||||
Ok(msg) => match msg {
|
||||
AxumMessage::Close(_) => {
|
||||
// println!("CLOSE");
|
||||
cancel.cancel();
|
||||
return;
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
Err(_) => {
|
||||
// println!("CLOSE FROM ERR");
|
||||
cancel.cancel();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(upgrade)
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
use anyhow::anyhow;
|
||||
use axum::{
|
||||
extract::{ws::Message, WebSocketUpgrade},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use db::DbClient;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde_json::json;
|
||||
use tokio::{
|
||||
select,
|
||||
sync::{
|
||||
broadcast::{self, Receiver, Sender},
|
||||
Mutex,
|
||||
},
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use types::{PermissionLevel, Update, UpdateTarget, User};
|
||||
|
||||
use crate::{auth::JwtExtension, state::StateExtension};
|
||||
|
||||
pub type UpdateWsSender = Mutex<Sender<Update>>;
|
||||
|
||||
pub type UpdateWsReciever = Receiver<Update>;
|
||||
pub struct UpdateWsChannel {
|
||||
pub sender: UpdateWsSender,
|
||||
pub reciever: UpdateWsReciever,
|
||||
}
|
||||
|
||||
impl UpdateWsChannel {
|
||||
pub fn new() -> UpdateWsChannel {
|
||||
let (sender, reciever) = broadcast::channel(16);
|
||||
UpdateWsChannel {
|
||||
sender: Mutex::new(sender),
|
||||
reciever,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn ws_handler(
|
||||
jwt_client: JwtExtension,
|
||||
state: StateExtension,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> impl IntoResponse {
|
||||
let mut reciever = state.update.reciever.resubscribe();
|
||||
ws.on_upgrade(|socket| async move {
|
||||
let login_res = state.ws_login(socket, &jwt_client).await;
|
||||
if login_res.is_none() {
|
||||
return;
|
||||
}
|
||||
let (socket, user) = login_res.unwrap();
|
||||
let (mut ws_sender, mut ws_reciever) = socket.split();
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let update = select! {
|
||||
_ = cancel_clone.cancelled() => break,
|
||||
update = reciever.recv() => {update.expect("failed to recv update msg")}
|
||||
};
|
||||
let user = state.db.users.find_one_by_id(&user.id).await;
|
||||
if user.is_err()
|
||||
|| user.as_ref().unwrap().is_none()
|
||||
|| !user.as_ref().unwrap().as_ref().unwrap().enabled
|
||||
{
|
||||
let _ = ws_sender
|
||||
.send(Message::Text(json!({ "type": "INVALID_USER" }).to_string()))
|
||||
.await;
|
||||
let _ = ws_sender.close().await;
|
||||
return;
|
||||
}
|
||||
let user = user.unwrap().unwrap(); // already handle cases where this panics in the above early return
|
||||
match user_can_see_update(&user, &user.id, &update.target, &state.db).await {
|
||||
Ok(_) => {
|
||||
let _ = ws_sender
|
||||
.send(Message::Text(serde_json::to_string(&update).unwrap()))
|
||||
.await;
|
||||
}
|
||||
Err(_) => {
|
||||
// make these error visible in some way
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
while let Some(msg) = ws_reciever.next().await {
|
||||
match msg {
|
||||
Ok(msg) => match msg {
|
||||
Message::Close(_) => {
|
||||
cancel.cancel();
|
||||
return;
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
Err(_) => {
|
||||
cancel.cancel();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn user_can_see_update(
|
||||
user: &User,
|
||||
user_id: &str,
|
||||
update_target: &UpdateTarget,
|
||||
db_client: &DbClient,
|
||||
) -> anyhow::Result<()> {
|
||||
if user.admin {
|
||||
return Ok(());
|
||||
}
|
||||
let (permissions, target) = match update_target {
|
||||
UpdateTarget::Server(server_id) => {
|
||||
let permissions = db_client
|
||||
.get_user_permission_on_server(user_id, server_id)
|
||||
.await?;
|
||||
(permissions, "server")
|
||||
}
|
||||
UpdateTarget::Deployment(deployment_id) => {
|
||||
let permissions = db_client
|
||||
.get_user_permission_on_deployment(user_id, deployment_id)
|
||||
.await?;
|
||||
(permissions, "deployment")
|
||||
}
|
||||
UpdateTarget::Build(build_id) => {
|
||||
let permissions = db_client
|
||||
.get_user_permission_on_build(user_id, build_id)
|
||||
.await?;
|
||||
(permissions, "build")
|
||||
}
|
||||
UpdateTarget::Procedure(procedure_id) => {
|
||||
let permissions = db_client
|
||||
.get_user_permission_on_procedure(user_id, procedure_id)
|
||||
.await?;
|
||||
(permissions, "procedure")
|
||||
}
|
||||
UpdateTarget::Group(group_id) => {
|
||||
let permissions = db_client
|
||||
.get_user_permission_on_group(user_id, group_id)
|
||||
.await?;
|
||||
(permissions, "group")
|
||||
}
|
||||
UpdateTarget::Command(command_id) => {
|
||||
let permissions = db_client
|
||||
.get_user_permission_on_command(user_id, command_id)
|
||||
.await?;
|
||||
(permissions, "command")
|
||||
}
|
||||
UpdateTarget::System => {
|
||||
return Err(anyhow!("user not admin, can't recieve system updates"))
|
||||
}
|
||||
};
|
||||
if permissions != PermissionLevel::None {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("user does not have permissions on {target}"))
|
||||
}
|
||||
}
|
||||
20
docsite/.gitignore
vendored
20
docsite/.gitignore
vendored
@@ -1,20 +0,0 @@
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
@@ -1,41 +0,0 @@
|
||||
# Website
|
||||
|
||||
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
|
||||
|
||||
### Installation
|
||||
|
||||
```
|
||||
$ yarn
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
```
|
||||
$ yarn start
|
||||
```
|
||||
|
||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
||||
|
||||
### Build
|
||||
|
||||
```
|
||||
$ yarn build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
|
||||
### Deployment
|
||||
|
||||
Using SSH:
|
||||
|
||||
```
|
||||
$ USE_SSH=true yarn deploy
|
||||
```
|
||||
|
||||
Not using SSH:
|
||||
|
||||
```
|
||||
$ GIT_USER=<Your GitHub username> yarn deploy
|
||||
```
|
||||
|
||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
||||
@@ -1,3 +0,0 @@
|
||||
module.exports = {
|
||||
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
|
||||
};
|
||||
@@ -1,42 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# api secrets
|
||||
|
||||
these routes are used to manage api secrets.
|
||||
|
||||
| name | route |
|
||||
| ---- | ------ |
|
||||
| [create api secret](/api/api-secrets#create-api-secret) | `POST /api/secret/create` |
|
||||
| [delete api secret](/api/api-secrets#delete-api-secret) | `DELETE /api/secret/delete/<secret-name>` |
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create api secret
|
||||
`POST /api/secret/create`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
name: string, // name the secret. must be unique among the users secrets
|
||||
expires?: rfc3339_timestamp, // optional expiry time. if none, the secret will not expire.
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
```json
|
||||
string // the body will be the secret hash used to log in.
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## delete api secret
|
||||
`DELETE /api/secret/delete/<secret-name>`
|
||||
|
||||
### response
|
||||
```json
|
||||
HTTP 200 OK
|
||||
```
|
||||
@@ -1,8 +0,0 @@
|
||||
# authenticating requests
|
||||
|
||||
monitor uses the `JSON Web Token (JWT)` standard to authenticate all requests to subroutes under `/api`.
|
||||
users can acquire a `JWT` using a [login method](/api/login).
|
||||
|
||||
to authenticate requests, pass the `JWT` under the `Authorization` header:
|
||||
|
||||
`Authorization: Bearer <JWT>`
|
||||
@@ -1,224 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# build
|
||||
|
||||
these routes relate to interacting with monitor `builds`
|
||||
|
||||
| name | route |
|
||||
| ---- | ------ |
|
||||
| [list builds](/api/build#list-builds) | `GET /api/build/list` |
|
||||
| [get build](/api/build#get-build) | `GET /api/build/<build_id>` |
|
||||
| [get build action state](/api/build#get-build-action-state) | `GET /api/build/<build_id>/action_state` |
|
||||
| [get build versions](/api/build#get-build-versions) | `GET /api/build/<build_id>/versions` |
|
||||
| [create build](/api/build#create-build) | `POST /api/build/create` |
|
||||
| [create full build](/api/build#create-full-build) | `POST /api/build/create_full` |
|
||||
| [copy build](/api/build#copy-build) | `POST /api/build/<build_id>/copy` |
|
||||
| [delete build](/api/build#delete-build) | `DELETE /api/build/<build_id>/delete` |
|
||||
| [update build](/api/build#update-build) | `PATCH /api/build/update` |
|
||||
| [build](/api/build#build-action) | `POST /api/build/<build_id>/build` |
|
||||
| [get aws builder defaults](/api/build#get-aws-builder-defaults) | `GET /api/build/aws_builder_defaults` |
|
||||
| [get allowed docker organizations](/api/build#get-allowed-docker-organizations) | `GET /api/build/docker_organizations` |
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## list builds
|
||||
`GET /api/build/list`
|
||||
|
||||
this method will return an array of builds the requesting user has a minimum of `Read` permissions on.
|
||||
|
||||
|
||||
### response body
|
||||
Array<[Build](/api/types#build)>
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get build
|
||||
`GET /api/build/<build_id>`
|
||||
|
||||
### response body
|
||||
[Build](/api/types#build)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get build action state
|
||||
`GET /api/build/<build_id>/action_state`
|
||||
|
||||
this method returns the action state for the build, eg. whether the build is currently `building`.
|
||||
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
building: boolean,
|
||||
updating: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get build versions
|
||||
`GET /api/build/<build_id>/versions`
|
||||
|
||||
paginated route for fetching the most recent available versions of this build.
|
||||
|
||||
### query params
|
||||
```json
|
||||
page=number // optional, default is 0. pagination starting at page 0.
|
||||
major=number // optional. filter by major version number
|
||||
minor=number // optional. filter by minor version number
|
||||
patch=number // optional. filter by patch version number
|
||||
```
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
ts: rfc3339_timestamp,
|
||||
version: {
|
||||
major: number,
|
||||
minor: number,
|
||||
patch: number,
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create build
|
||||
`POST /api/build/create`
|
||||
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
name: string,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Build](/api/types#build)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create full build
|
||||
`POST /api/build/create_full`
|
||||
|
||||
|
||||
### request body
|
||||
[Build](/api/types#build)
|
||||
|
||||
### response body
|
||||
[Build](/api/types#build)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## copy build
|
||||
`POST /api/build/<build_id>/copy`
|
||||
|
||||
this method will create a copy of the build with a new _id and name,
|
||||
with all the same configuration as the target build.
|
||||
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
name: string, // the new name
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Build](/api/types#build)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## delete build
|
||||
`DELETE /api/build/<build_id>/delete`
|
||||
|
||||
### response body
|
||||
[Build](/api/types#build)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## update build
|
||||
`PATCH /api/build/update`
|
||||
|
||||
### request body
|
||||
[Build](/api/types#build)
|
||||
|
||||
### response body
|
||||
[Build](/api/types#build)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## build (action)
|
||||
`POST /api/build/<build_id>/build`
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
:::note
|
||||
this update will include the `version` field.
|
||||
:::
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get aws builder defaults
|
||||
`GET /api/build/aws_builder_defaults`
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
default_ami_name: string,
|
||||
default_subnet_id: string,
|
||||
default_key_pair_name: string,
|
||||
default_region: string,
|
||||
default_volume_gb: number,
|
||||
default_instance_type: string,
|
||||
default_security_group_ids: string[],
|
||||
default_assign_public_ip: boolean,
|
||||
available_ami_accounts: [
|
||||
{
|
||||
ami_id: string,
|
||||
github: string[],
|
||||
docker: string[],
|
||||
secrets: string[],
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get allowed docker organizations
|
||||
`GET /api/build/docker_organizations`
|
||||
|
||||
### response body
|
||||
```json
|
||||
string[] // the names of the allowed docker organizations
|
||||
```
|
||||
@@ -1,344 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# deployment
|
||||
|
||||
these routes relate to interacting with monitor `deployments`
|
||||
|
||||
| name | route |
|
||||
| ---- | ------ |
|
||||
| [list deployments](/api/deployment#list-deployments) | `GET /api/deployment/list` |
|
||||
| [get deployment](/api/deployment#get-deployment) | `GET /api/deployment/<deployment_id>` |
|
||||
| [get deployment action state](/api/deployment#get-deployment-action-state) | `GET /api/deployment/<deployment_id>/action_state` |
|
||||
| [get deployment container log](/api/deployment#get-deployment-container-log) | `GET /api/deployment/<deployment_id>/log` |
|
||||
| [get deployment container stats](/api/deployment#get-deployment-container-stats) | `GET /api/deployment/<deployment_id>/stats` |
|
||||
| [get deployment deployed version](/api/deployment#get-deployment-deployed-version) | `GET /api/deployment/<deployment_id>/deployed_version` |
|
||||
| [create deployment](/api/deployment#create-deployment) | `POST /api/deployment/create` |
|
||||
| [create full deployment](/api/deployment#create-full-deployment) | `POST /api/deployment/create_full` |
|
||||
| [copy deployment](/api/deployment#copy-deployment) | `POST /api/deployment/<deployment_id>/copy` |
|
||||
| [delete deployment](/api/deployment#delete-deployment) | `DELETE /api/deployment/<deployment_id>/delete` |
|
||||
| [update deployment](/api/deployment#update-deployment) | `PATCH /api/deployment/update` |
|
||||
| [rename deployment](/api/deployment#rename-deployment) | `PATCH /api/deployment/<deployment_id>/rename` |
|
||||
| [reclone deployment](/api/deployment#reclone-deployment) | `POST /api/deployment/<deployment_id>/reclone` |
|
||||
| [pull deployment](/api/deployment#pull-deployment) | `POST /api/deployment/<deployment_id>/pull` |
|
||||
| [deploy container](/api/deployment#deploy-container) | `POST /api/deployment/<deployment_id>/deploy` |
|
||||
| [start container](/api/deployment#start-container) | `POST /api/deployment/<deployment_id>/start_container` |
|
||||
| [stop container](/api/deployment#stop-container) | `POST /api/deployment/<deployment_id>/stop_container` |
|
||||
| [remove container](/api/deployment#remove-container) | `POST /api/deployment/<deployment_id>/remove_container` |
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## list deployments
|
||||
`GET /api/deployment/list`
|
||||
|
||||
this method will return an array of deployments with container state that the requesting user has a minimum of `Read` permissions on.
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
deployment: Deployment,
|
||||
state: DockerContainerState,
|
||||
container?: {
|
||||
name: string,
|
||||
id: string,
|
||||
image: string,
|
||||
state: DockerContainerState,
|
||||
status?: string,
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get deployment
|
||||
`GET /api/deployment/<deployment_id>`
|
||||
|
||||
this method will return the deployment with container state that
|
||||
the requesting user has a minimum of `Read` permissions on.
|
||||
it will return `500: Internal Server Error` if the user does not have the required permissions.
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
deployment: Deployment,
|
||||
state: DockerContainerState,
|
||||
container?: {
|
||||
name: string,
|
||||
id: string,
|
||||
image: string,
|
||||
state: DockerContainerState,
|
||||
status?: string,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
## get deployment action state
|
||||
`GET /api/deployment/<deployment_id>/action_state`
|
||||
|
||||
this method returns the action state for the deployment, eg. whether the deployment is currently `deploying`.
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
deploying: boolean,
|
||||
stopping: boolean,
|
||||
starting: boolean,
|
||||
removing: boolean,
|
||||
pulling: boolean,
|
||||
recloning: boolean,
|
||||
updating: boolean,
|
||||
renaming: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get deployment container log
|
||||
`GET /api/deployment/<deployment_id>/log`
|
||||
|
||||
this method is used to get the container's log associated with the deployment.
|
||||
|
||||
### query params
|
||||
```json
|
||||
{
|
||||
tail: number // number of log lines to fetch. this is passed to the --tail flag of docker logs command
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
stdout: string,
|
||||
stderr: string,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get deployment container stats
|
||||
`GET /api/deployment/<deployment_id>/stats`
|
||||
|
||||
this method returns the results of running `docker stats <container_name>`
|
||||
for the container associated with the deployment.
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
name: string,
|
||||
cpu_perc: string,
|
||||
mem_perc: string,
|
||||
mem_usage: string,
|
||||
net_io: string,
|
||||
block_io: string,
|
||||
pids: string,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get deployment deployed version
|
||||
`GET /api/deployment/<deployment_id>/deployed_version`
|
||||
|
||||
this method is used to get the image version of the container associated with the deployment, if it exists.
|
||||
otherwise, it will return the version specified in the deployment config.
|
||||
|
||||
### response body
|
||||
```json
|
||||
string // the deployed version like '0.2.4'
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create deployment
|
||||
`POST /api/deployment/create`
|
||||
|
||||
this method is used to create a new deployment on a particular server.
|
||||
it will return the created deployment.
|
||||
|
||||
:::note
|
||||
users must be **admin** or have `update` permissions on the server specified by the `server_id`
|
||||
in the request body in order for this request to succeed.
|
||||
:::
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
name: string,
|
||||
server_id: string,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create full deployment
|
||||
`POST /api/deployment/create_full`
|
||||
|
||||
this method is used to create a new deployment on a particular server, already initialized with config.
|
||||
it will return the created deployment
|
||||
|
||||
### request body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
### response body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## copy deployment
|
||||
`POST /api/deployment/<deployment_id>/copy`
|
||||
|
||||
this method will create a copy of the deployment with a new _id and name,
|
||||
with all the same configuration as the target deployment.
|
||||
it can be used to move the deployment to another server.
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
name: string,
|
||||
server_id: string,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## delete deployment
|
||||
`DELETE /api/deployment/<deployment_id>/delete`
|
||||
|
||||
this method will delete the deployment. if a container is associated with the deployment, it will be destroyed.
|
||||
|
||||
### response body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## update deployment
|
||||
`PATCH /api/deployment/update`
|
||||
|
||||
### request body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
### response body
|
||||
[Deployment](/api/types#deployment)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## rename deployment
|
||||
`PATCH /api/deployment/<deployment_id>/rename`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
new_name: string,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## reclone deployment
|
||||
`POST /api/deployment/<deployment_id>/reclone`
|
||||
|
||||
if the deployment has a repo attached, this will reclone the repo,
|
||||
including the on-clone and on-pull actions.
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## pull deployment
|
||||
`POST /api/deployment/<deployment_id>/pull`
|
||||
|
||||
if the deployment has a repo attached, this will `git pull` in the repo,
|
||||
including the on-pull action.
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## deploy container
|
||||
`POST /api/deployment/<deployment_id>/deploy`
|
||||
|
||||
this will deploy the container corresponding to the deployments configuration.
|
||||
if the container already exists, it will destroy it first.
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## start container
|
||||
`POST /api/deployment/<deployment_id>/start_container`
|
||||
|
||||
this will run `docker start <container_name>` for the container
|
||||
corresponding to the deployment
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## stop container
|
||||
`POST /api/deployment/<deployment_id>/stop_container`
|
||||
|
||||
this will run `docker stop <container_name>` for the container
|
||||
corresponding to the deployment
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## remove container
|
||||
`POST /api/deployment/<deployment_id>/remove_container`
|
||||
|
||||
this will run `docker stop <container_name> && docker container rm <container_name>`
|
||||
for the container corresponding to the deployment
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
@@ -1,11 +0,0 @@
|
||||
---
|
||||
slug: /api
|
||||
---
|
||||
|
||||
this section documents the rest and websocket api
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
||||
<DocCardList />
|
||||
```
|
||||
@@ -1,103 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# login
|
||||
|
||||
monitor supports local login (username and password), Oauth2 login (github and google),
|
||||
and secret login (username and API secret key).
|
||||
each method must be explicitly enabled in your monitor core config,
|
||||
otherwise the api won't be available.
|
||||
|
||||
:::note
|
||||
in order to login to an Oauth2 user's account programmatically,
|
||||
you must [create an api secret](/api/api-secrets#create-api-secret) and login using [/auth/secret/login](/api/login#login-using-api-secret)
|
||||
:::
|
||||
|
||||
| name | route |
|
||||
| ---- | ------ |
|
||||
| [get login options](/api/login#get-login-options) | `GET /auth/options` |
|
||||
| [create local user account](/api/login#create-local-user-account) | `POST /auth/local/create_user` |
|
||||
| [login local user account](/api/login#login-local-user-account) | `POST /auth/local/login` |
|
||||
| [login using api secret](/api/login#login-using-api-secret) | `POST /auth/secret/login` |
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get login options
|
||||
`GET /auth/options`
|
||||
|
||||
this method is used to obtain the login options for monitor core
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
local: boolean,
|
||||
github: boolean,
|
||||
google: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create local user account
|
||||
`POST /auth/local/create_user`
|
||||
|
||||
this method will create a new local auth account with the provided **username** and **password**,
|
||||
and return a `JWT` for the user to authenticate with.
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
username: string,
|
||||
password: string,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
`<JWT token as string>`
|
||||
|
||||
:::caution
|
||||
a user created with this method is, by default, `disabled`. a monitor admin must enable their account before they can access the API.
|
||||
:::
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## login local user account
|
||||
`POST /auth/local/login`
|
||||
|
||||
this method will authenticate a local users credentials and return a JWT if login is successful.
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
username: string,
|
||||
password: string,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
`<JWT token as string>`
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## login using api secret
|
||||
`POST /auth/secret/login`
|
||||
|
||||
this method will authenticate a users account of any kind using an api secret generated using [/api/secret/create](/api/api-secrets#create-api-secret)
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
username: string,
|
||||
secret: string,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
`<JWT token as string>`
|
||||
@@ -1,90 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# permissions
|
||||
|
||||
these routes relate to updating user permissions
|
||||
|
||||
:::note
|
||||
these routes can only be called by **admin** users
|
||||
:::
|
||||
|
||||
| name | route |
|
||||
| ---- | ------ |
|
||||
| [update user permissions on target](/api/permissions#update-user-permissions-on-target) | `POST /api/permissions/update` |
|
||||
| [modify user enabled](/api/permissions#modify-user-enabled) | `POST /api/permissions/modify_enabled` |
|
||||
| [modify user create server permissions](/api/permissions#modify-user-create-server-permissions) | `POST /api/permissions/modify_create_server` |
|
||||
| [modify user create build permissions](/api/permissions#modify-user-create-build-permissions) | `POST /api/permissions/modify_create_build` |
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## update user permissions on target
|
||||
`POST /api/permissions/update`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
user_id: string, // the target users id
|
||||
permission: "none" | "read" | "execute" | "update",
|
||||
target_type: "server" | "deployment" | "build" | "procedure" | "group",
|
||||
target_id: string, // the target resources id
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## modify user enabled
|
||||
`POST /api/permissions/modify_enabled`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
user_id: string, // the target users id
|
||||
enabled: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## modify user create server permissions
|
||||
`POST /api/permissions/modify_create_server`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
user_id: string, // the target users id
|
||||
create_server_permissions: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## modify user create build permissions
|
||||
`POST /api/permissions/modify_create_build`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
user_id: string, // the target users id
|
||||
create_build_permissions: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
@@ -1,10 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# procedure
|
||||
|
||||
these routes relate to interacting with monitor `procedures`
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
@@ -1,473 +0,0 @@
|
||||
import Divider from '@site/src/components/Divider';
|
||||
|
||||
# server
|
||||
|
||||
these routes relate to interacting with monitor `servers`
|
||||
|
||||
| name | route |
|
||||
| ---- | ------ |
|
||||
| [list servers](/api/server#list-servers) | `GET /api/server/list` |
|
||||
| [get server](/api/server#get-server) | `GET /api/server/<server_id>` |
|
||||
| [get server action state](/api/server#get-server-action-state) | `GET /api/server/<server_id>/action_state` |
|
||||
| [get server github accounts](/api/server#get-server-github-accounts) | `GET /api/server/<server_id>/github_accounts` |
|
||||
| [get server docker accounts](/api/server#get-server-docker-accounts) | `GET /api/server/<server_id>/docker_accounts` |
|
||||
| [get server available secrets](/api/server#get-server-available-secrets) | `GET /api/server/<server_id>/secrets` |
|
||||
| [create server](/api/server#create-server) | `POST /api/server/create` |
|
||||
| [create full server](/api/server#create-full-server) | `POST /api/server/create_full` |
|
||||
| [delete server](/api/server#delete-server) | `DELETE /api/server/<server_id>/delete` |
|
||||
| [update server](/api/server#update-server) | `PATCH /api/server/update` |
|
||||
| [get server periphery version](/api/server#get-server-periphery-version) | `GET /api/server/<server_id>/version` |
|
||||
| [get server system information](/api/server#get-server-system-information) | `GET /api/server/<server_id>/system_information` |
|
||||
| [get server stats](/api/server#get-server-stats) | `GET /api/server/<server_id>/stats` |
|
||||
| [get server stats history](/api/server#get-server-stats-history) | `GET /api/server/<server_id>/stats/history` |
|
||||
| [get server stats at time](/api/server#get-server-stats-at-time) | `GET /api/server/<server_id>/stats/at_ts` |
|
||||
| [get docker networks](/api/server#get-docker-networks) | `GET /api/server/<server_id>/networks` |
|
||||
| [prune docker networks](/api/server#prune-docker-networks) | `POST /api/server/<server_id>/networks/prune` |
|
||||
| [get docker images](/api/server#get-docker-images) | `GET /api/server/<server_id>/images` |
|
||||
| [prune docker images](/api/server#prune-docker-images) | `POST /api/server/<server_id>/images/prune` |
|
||||
| [get docker containers](/api/server#get-docker-containers) | `GET /api/server/<server_id>/containers` |
|
||||
| [prune docker containers](/api/server#prune-docker-containers) | `POST /api/server/<server_id>/containers/prune` |
|
||||
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## list servers
|
||||
`GET /api/server/list`
|
||||
|
||||
this method will return an array of servers with their status
|
||||
that the requesting user has a minimum of `Read` permissions on.
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
server: Server,
|
||||
status: ServerStatus
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server
|
||||
`GET /api/server/<server_id>`
|
||||
|
||||
this method will return the server with server status that
|
||||
the requesting user has a minimum of `Read` permissions on.
|
||||
it will return `500: Internal Server Error` if the user does not have the required permissions.
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
server: Server,
|
||||
status: ServerStatus
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server action state
|
||||
`GET /api/server/<server_id>/action_state`
|
||||
|
||||
this method returns the action state for the server, eg. whether the server is currently `pruning_images`.
|
||||
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
pruning_networks: boolean,
|
||||
pruning_containers: boolean,
|
||||
pruning_images: boolean,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server github accounts
|
||||
`GET /api/server/<server_id>/github_accounts`
|
||||
|
||||
this method returns a list of all the github account usernames that are available on the server,
|
||||
as defined in the server's periphery config under [github_accounts].
|
||||
|
||||
### response body
|
||||
```json
|
||||
["<github_username_1>", "<github_username_2>", ...]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server docker accounts
|
||||
`GET /api/server/<server_id>/docker_accounts`
|
||||
|
||||
this method returns a list of all the docker account usernames that are available on the server,
|
||||
as defined in the server's periphery config under [docker_accounts].
|
||||
|
||||
### response body
|
||||
```json
|
||||
["<docker_username_1>", "<docker_username_2>", ...]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server available secrets
|
||||
`GET /api/server/<server_id>/secrets`
|
||||
|
||||
this method returns a list of all the secret keys that are available on the server,
|
||||
as defined in the server's periphery config under [secrets].
|
||||
|
||||
### response body
|
||||
```json
|
||||
["<secret_key_1>", "<secret_key_2>", ...]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create server
|
||||
`POST /api/server/create`
|
||||
|
||||
### request body
|
||||
```json
|
||||
{
|
||||
name: string,
|
||||
address: string, // eg. http://12.34.56.78:8000
|
||||
}
|
||||
```
|
||||
|
||||
### response body
|
||||
[Server](/api/types#server)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## create full server
|
||||
`POST /api/server/create_full`
|
||||
|
||||
this method is used to create a new server, already initialized with config.
|
||||
it will return the created server.
|
||||
|
||||
### request body
|
||||
[Server](/api/types#server)
|
||||
|
||||
### response body
|
||||
[Server](/api/types#server)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## delete server
|
||||
`DELETE /api/server/<server_id>/delete`
|
||||
|
||||
this method will delete the server, along with all deployments attached to the server.
|
||||
|
||||
### response body
|
||||
[Server](/api/types#server)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## update server
|
||||
`PATCH /api/server/update`
|
||||
|
||||
this method is used to update a servers configuration.
|
||||
|
||||
### request body
|
||||
[Server](/api/types#server)
|
||||
|
||||
### response body
|
||||
[Server](/api/types#server)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server periphery version
|
||||
`GET /api/server/<server_id>/version`
|
||||
|
||||
this method is used to get the version of the periphery binary running on the server.
|
||||
|
||||
### response body
|
||||
```json
|
||||
string // the periphery version
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server system information
|
||||
`GET /api/server/<server_id>/system_information`
|
||||
|
||||
this method gets some information about the host system running the periphery binary.
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
name?: string, // the name of the system
|
||||
os?: string, // the os the system is running
|
||||
kernel?: string, // the version of the kernel
|
||||
core_count?: number, // number of cores in the cpu
|
||||
host_name?: string, // host name of the system
|
||||
cpu_brand: string, // information on the cpu of the system
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server stats
|
||||
`GET /api/server/<server_id>/stats`
|
||||
|
||||
this method retrieves current system stats of the server.
|
||||
|
||||
### query params
|
||||
```json
|
||||
cpus=boolean // optional. if true, response will include information about each core individually
|
||||
disks=boolean // optional. if true, response will include breakdown of disk usage by mount point
|
||||
networks=boolean // optional. if true, response will include info on network usage
|
||||
components=boolean // optional. if true, response will include component tempurature
|
||||
processes=boolean // optional. if true, response will include all system processes running on host and their resource usage
|
||||
```
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
system_load: number,
|
||||
cpu_perc: number,
|
||||
cpu_freq_mhz: number,
|
||||
mem_used_gb: number,
|
||||
mem_total_gb: number,
|
||||
disk: {},
|
||||
cpus: [],
|
||||
networks: [],
|
||||
components: [],
|
||||
processes: [],
|
||||
polling_rate: Timelength,
|
||||
refresh_ts: number,
|
||||
refresh_list_ts: number,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server stats history
|
||||
`GET /api/server/<server_id>/stats/history`
|
||||
|
||||
this method will return historical system stats for the server.
|
||||
the response is paginated, to get older data, specify a higher page number.
|
||||
|
||||
### query params
|
||||
```json
|
||||
interval=Timelength // optional, default interval is 1-hr. controls granularity of historical data
|
||||
limit=number // optional, default is 100, max is 500. specifies the number of data points to fetch
|
||||
page=number // optional, default is 0. specifies the page of data, going backward in time.
|
||||
networks=boolean // optional. if true, response will include historical info on network usage
|
||||
components=boolean // optional. if true, response will include historical component tempuratures
|
||||
```
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
ts: number, // unix timestamp in ms
|
||||
server_id: string // specifies the server
|
||||
system_load: number,
|
||||
cpu_perc: number,
|
||||
cpu_freq_mhz: number,
|
||||
mem_used_gb: number,
|
||||
mem_total_gb: number,
|
||||
disk: {},
|
||||
cpus: [],
|
||||
networks: [],
|
||||
components: [],
|
||||
processes: [],
|
||||
polling_rate: Timelength,
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get server stats at time
|
||||
`GET /api/server/<server_id>/stats/at_ts`
|
||||
|
||||
this method retrieves the historical stats for a server at a specific timestamp
|
||||
|
||||
### query params
|
||||
```json
|
||||
ts=number // required. the timestamp in ms
|
||||
```
|
||||
|
||||
### response body
|
||||
```json
|
||||
{
|
||||
ts: number, // unix timestamp in ms
|
||||
server_id: string // specifies the server
|
||||
system_load: number,
|
||||
cpu_perc: number,
|
||||
cpu_freq_mhz: number,
|
||||
mem_used_gb: number,
|
||||
mem_total_gb: number,
|
||||
disk: {},
|
||||
cpus: [],
|
||||
networks: [],
|
||||
components: [],
|
||||
processes: [],
|
||||
polling_rate: Timelength,
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get docker networks
|
||||
`GET /api/server/<server_id>/networks`
|
||||
|
||||
this method retrieves the docker networks on the server
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
Name?: string,
|
||||
Id?: string,
|
||||
Created?: string,
|
||||
Scope?: string,
|
||||
Driver?: string,
|
||||
EnableIPv6?: boolean,
|
||||
IPAM?: {
|
||||
Driver?: string,
|
||||
Config?: [
|
||||
{
|
||||
Subnet?: string,
|
||||
IPRange?: string,
|
||||
Gateway?: string,
|
||||
AuxiliaryAddresses?: {}
|
||||
},
|
||||
...
|
||||
],
|
||||
Options?: {}
|
||||
},
|
||||
Internal?: boolean,
|
||||
Attachable?: boolean,
|
||||
Ingress?: boolean,
|
||||
Containers?: {},
|
||||
Options?: {},
|
||||
Labels?: {}
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## prune docker networks
|
||||
`POST /api/server/<server_id>/networks/prune`
|
||||
|
||||
this method triggers the `network prune` action on the server, which runs
|
||||
`docker network prune -f` on the target server
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get docker images
|
||||
`GET /api/server/<server_id>/images`
|
||||
|
||||
this method will return a list of images available locally on the server
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
Id: string,
|
||||
ParentId: string,
|
||||
RepoTags: [string],
|
||||
RepoDigests: [string],
|
||||
Created: number,
|
||||
Size: number,
|
||||
SharedSize: number,
|
||||
VirtualSize: number,
|
||||
Labels: {},
|
||||
Containers: number,
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## prune docker images
|
||||
`POST /api/server/<server_id>/images/prune`
|
||||
|
||||
this method triggers the `image prune` action, which runs
|
||||
`docker image prune -a -f` on the target server
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## get docker containers
|
||||
`GET /api/server/<server_id>/containers`
|
||||
|
||||
this method is used to retrieve information about all the containers on the target server
|
||||
|
||||
### response body
|
||||
```json
|
||||
[
|
||||
{
|
||||
name: string,
|
||||
id: string,
|
||||
image: string,
|
||||
state: DockerContainerState,
|
||||
status?: string,
|
||||
},
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## prune docker containers
|
||||
`POST /api/server/<server_id>/containers/prune`
|
||||
|
||||
this method triggers the `container prune` action, which runs
|
||||
`docker container prune -f` on the target server
|
||||
|
||||
### response body
|
||||
[Update](/api/types#update)
|
||||
@@ -1,283 +0,0 @@
|
||||
import Divider from "@site/src/components/Divider";
|
||||
|
||||
# types
|
||||
|
||||
these types are used across the monitor api, defined using `typescript`. they are referenced throughout the api docs.
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## build
|
||||
|
||||
```typescript
|
||||
interface Build {
|
||||
_id?: {
|
||||
$oid: string;
|
||||
};
|
||||
name: string;
|
||||
description?: string;
|
||||
permissions?: {
|
||||
[user_id: string]: PermissionLevel;
|
||||
};
|
||||
skip_secret_interp?: boolean;
|
||||
server_id?: string;
|
||||
aws_config?: {
|
||||
region?: string;
|
||||
instance_type?: string;
|
||||
ami_name?: string;
|
||||
volume_gb?: number;
|
||||
subnet_id?: string;
|
||||
security_group_ids?: string[];
|
||||
key_pair_name?: string;
|
||||
assign_public_ip?: boolean;
|
||||
};
|
||||
version: {
|
||||
major: number;
|
||||
minor: number;
|
||||
patch: number;
|
||||
};
|
||||
repo?: string;
|
||||
branch?: string;
|
||||
github_account?: string;
|
||||
pre_build?: {
|
||||
path?: string;
|
||||
command?: string;
|
||||
};
|
||||
docker_build_args?: {
|
||||
build_path: string;
|
||||
dockerfile_path?: string;
|
||||
build_args?: Array<{
|
||||
variable: string;
|
||||
value: string;
|
||||
}>;
|
||||
extra_args?: string[];
|
||||
use_buildx?: boolean;
|
||||
};
|
||||
docker_account?: string;
|
||||
docker_organization?: string;
|
||||
last_built_at?: string;
|
||||
created_at?: string;
|
||||
updated_at?: string;
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## deployment
|
||||
|
||||
```typescript
|
||||
interface Deployment {
|
||||
_id?: {
|
||||
$oid: string;
|
||||
};
|
||||
name: string;
|
||||
description?: string;
|
||||
server_id: string;
|
||||
permissions?: PermissionLevel;
|
||||
skip_secret_interp?: boolean;
|
||||
docker_run_args: {
|
||||
image: string;
|
||||
ports?: Array<{
|
||||
local: string;
|
||||
container: string;
|
||||
}>;
|
||||
volumes?: Array<{
|
||||
local: string;
|
||||
container: string;
|
||||
}>;
|
||||
environment?: Array<{
|
||||
variable: string;
|
||||
value: string;
|
||||
}>;
|
||||
network?: string;
|
||||
restart?: "no" | "on-failure" | "always" | "unless-stopped";
|
||||
post_image?: string;
|
||||
container_user?: string;
|
||||
extra_args?: string[];
|
||||
docker_account?: string;
|
||||
};
|
||||
build_id?: string;
|
||||
build_version?: {
|
||||
major: number;
|
||||
minor: number;
|
||||
patch: number;
|
||||
};
|
||||
repo?: string;
|
||||
branch?: string;
|
||||
github_account?: string;
|
||||
on_clone?: {
|
||||
path?: string;
|
||||
command?: string;
|
||||
};
|
||||
on_pull?: {
|
||||
path?: string;
|
||||
command?: string;
|
||||
};
|
||||
repo_mount?: {
|
||||
local: string;
|
||||
container: string;
|
||||
};
|
||||
created_at?: string;
|
||||
updated_at?: string;
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## server
|
||||
|
||||
```typescript
|
||||
interface Server {
|
||||
_id?: string;
|
||||
name: string;
|
||||
description?: string;
|
||||
address: string;
|
||||
permissions?: {
|
||||
[user_id: string]: PermissionLevel;
|
||||
};
|
||||
enabled: boolean;
|
||||
to_notify?: string[];
|
||||
auto_prune?: boolean;
|
||||
cpu_alert?: number;
|
||||
mem_alert?: number;
|
||||
disk_alert?: number;
|
||||
stats_interval?: Timelength;
|
||||
region?: string;
|
||||
instance_id?: string;
|
||||
created_at?: string;
|
||||
updated_at?: string;
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## update
|
||||
|
||||
```typescript
|
||||
interface Update {
|
||||
_id?: string;
|
||||
target: {
|
||||
type: "System" | "Build" | "Deployment" | "Server" | "Procedure" | "Group";
|
||||
id?: string;
|
||||
};
|
||||
operation: Operation;
|
||||
logs: Array<{
|
||||
stage: string;
|
||||
command: string;
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
success: boolean;
|
||||
start_ts: string;
|
||||
end_ts: string;
|
||||
}>;
|
||||
start_ts: string;
|
||||
end_ts?: string;
|
||||
status: "queued" | "in_progress" | "complete";
|
||||
success: boolean;
|
||||
operator: string;
|
||||
version?: {
|
||||
major: number;
|
||||
minor: number;
|
||||
patch: number;
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## operation
|
||||
|
||||
```typescript
|
||||
enum Operation {
|
||||
None = "none",
|
||||
CreateServer = "create_server",
|
||||
UpdateServer = "update_server",
|
||||
DeleteServer = "delete_server",
|
||||
PruneImagesServer = "prune_images_server",
|
||||
PruneContainersServer = "prune_containers_server",
|
||||
PruneNetworksServer = "prune_networks_server",
|
||||
RenameServer = "rename_server",
|
||||
CreateBuild = "create_build",
|
||||
UpdateBuild = "update_build",
|
||||
DeleteBuild = "delete_build",
|
||||
BuildBuild = "build_build",
|
||||
CreateDeployment = "create_deployment",
|
||||
UpdateDeployment = "update_deployment",
|
||||
DeleteDeployment = "delete_deployment",
|
||||
DeployContainer = "deploy_container",
|
||||
StopContainer = "stop_container",
|
||||
StartContainer = "start_container",
|
||||
RemoveContainer = "remove_container",
|
||||
PullDeployment = "pull_deployment",
|
||||
RecloneDeployment = "reclone_deployment",
|
||||
RenameDeployment = "rename_deployment",
|
||||
CreateProcedure = "create_procedure",
|
||||
UpdateProcedure = "update_procedure",
|
||||
DeleteProcedure = "delete_procedure",
|
||||
CreateGroup = "create_group",
|
||||
UpdateGroup = "update_group",
|
||||
DeleteGroup = "delete_group",
|
||||
ModifyUserEnabled = "modify_user_enabled",
|
||||
ModifyUserCreateServerPermissions = "modify_user_create_server_permissions",
|
||||
ModifyUserCreateBuildPermissions = "modify_user_create_build_permissions",
|
||||
ModifyUserPermissions = "modify_user_permissions",
|
||||
AutoBuild = "auto_build",
|
||||
AutoPull = "auto_pull",
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## permission level
|
||||
|
||||
```typescript
|
||||
enum PermissionLevel {
|
||||
None = "none",
|
||||
Read = "read",
|
||||
Execute = "execute",
|
||||
Update = "update",
|
||||
}
|
||||
```
|
||||
|
||||
```mdx-code-block
|
||||
<Divider />
|
||||
```
|
||||
|
||||
## timelength
|
||||
|
||||
```typescript
|
||||
enum Timelength {
|
||||
OneSecond = "1-sec",
|
||||
FiveSeconds = "5-sec",
|
||||
TenSeconds = "10-sec",
|
||||
FifteenSeconds = "15-sec",
|
||||
ThirtySeconds = "30-sec",
|
||||
OneMinute = "1-min",
|
||||
TwoMinutes = "2-min",
|
||||
FiveMinutes = "5-min",
|
||||
TenMinutes = "10-min",
|
||||
FifteenMinutes = "15-min",
|
||||
ThirtyMinutes = "30-min",
|
||||
OneHour = "1-hr",
|
||||
TwoHours = "2-hr",
|
||||
SixHours = "6-hr",
|
||||
EightHours = "8-hr",
|
||||
TwelveHours = "12-hr",
|
||||
OneDay = "1-day",
|
||||
ThreeDay = "3-day",
|
||||
OneWeek = "1-wk",
|
||||
TwoWeeks = "2-wk",
|
||||
ThirtyDays = "30-day",
|
||||
}
|
||||
```
|
||||
@@ -1,9 +0,0 @@
|
||||
# select a builder
|
||||
|
||||
A builder is a machine running monitor periphery and docker. Any server connected to monitor can be chosen as the builder for a build.
|
||||
|
||||
Building on a machine running production software is usually not a great idea, as this process can use a lot of system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Right now monitor supports AWS ec2 for this task.
|
||||
|
||||
### AWS builder
|
||||
|
||||
You can choose to build on AWS on the "builder" tab on the build's page. From here you can select preconfigured AMIs to use as a base to build the image. These must be configured in the monitor core configuration along with other information like defaults to use, AWS credentials, etc. This is explained on the [core setup page](https://github.com/mbecker20/monitor/blob/main/docs/setup.md).
|
||||
@@ -1,29 +0,0 @@
|
||||
# configuration
|
||||
|
||||
monitor just needs a bit of information in order to build your image.
|
||||
|
||||
### repo configuration
|
||||
To specify the github repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo.
|
||||
|
||||
Many repos are private, in this case a Github access token is required in the periphery.config.toml of the building server. these are specified in the config like ```username = "access_token"```. An account which has access to the repo and is available on the periphery server can be selected to use via the *github account* dropdown menu.
|
||||
|
||||
### docker build configuration
|
||||
|
||||
In order to docker build, monitor just needs to know the build directory and the path of the Dockerfile relative to the repo, you can configure these in the *build config* section.
|
||||
|
||||
If the build directory is the root of the repository, you pass the build path as ```.```. If the build directory is some folder of the repo, just pass the name of the the folder. Do not pass the preceding "/". for example ```build/directory```
|
||||
|
||||
The dockerfile's path is given relative to the build directory. So if your build directory is ```build/directory``` and the dockerfile is in ```build/directory/Dockerfile.example```, you give the dockerfile path simply as ```Dockerfile.example```.
|
||||
|
||||
Just as with private repos, you will need to select a docker account to use with ```docker push```.
|
||||
|
||||
### adding build args
|
||||
|
||||
The Dockerfile may make use of [build args](https://docs.docker.com/engine/reference/builder/#arg). Build args can be passed using the gui by pressing the ```edit``` button. They are passed in the menu just like in the would in a .env file:
|
||||
|
||||
```
|
||||
BUILD_ARG1=some_value
|
||||
BUILD_ARG2=some_other_value
|
||||
```
|
||||
|
||||
Note that these values are visible in the final image using ```docker history```, so shouldn't be used to pass build time secrets. Use [secret mounts](https://docs.docker.com/engine/reference/builder/#run---mounttypesecret) for this instead.
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
slug: /build-images
|
||||
---
|
||||
|
||||
# building images
|
||||
|
||||
Monitor builds docker images by cloning the source repository from Github, running ```docker build```, and pushing the resulting image to docker hub. Any repo containing a ```Dockerfile``` is buildable using this method.
|
||||
|
||||
Build configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/file-paths).
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
||||
<DocCardList />
|
||||
```
|
||||
@@ -1,7 +0,0 @@
|
||||
# pre build command
|
||||
|
||||
Sometimes a command needs to be run before running ```docker build```, you can configure this in the *pre build* section.
|
||||
|
||||
There are two fields to pass for *pre build*. the first is *path*, which changes the working directory. To run the command in the root of the repo, just pass ```.```. The second field is *command*, this is the shell command to be executed after the repo is cloned.
|
||||
|
||||
For example, say your repo had a folder in it called ```scripts``` with a shell script ```on-clone.sh```. You would give *path* as ```scripts``` and command as ```sh on-clone.sh```. Or you could make *path* just ```.``` and then the command would be ```sh scripts/on-clone.sh```. Either way works fine.
|
||||
@@ -1,3 +0,0 @@
|
||||
# versioning
|
||||
|
||||
Monitor uses a major.minor.patch versioning scheme. Every build will auto increment the patch number, and push the image to docker hub with the version tag as well as the ```latest``` tag.
|
||||
@@ -1,7 +0,0 @@
|
||||
# adding the server to monitor
|
||||
|
||||
The easiest way to add the server is with the GUI. On the home page, click the ```+``` button to the right of the server search bar, configure the name and address of the server. The address is the full http/s url to the periphery server, eg ```http://12.34.56.78:8000```.
|
||||
|
||||
Once it is added, you can use access the GUI to modify some config, like the alerting thresholds for cpu, memory and disk usage. A server can also be temporarily disabled, this will prevent alerting if it goes offline.
|
||||
|
||||
Since no state is stored on the periphery servers, you can easily redirect all deployments to be hosted on a different server. Just update the address to point to the new server.
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
slug: /connecting-servers
|
||||
---
|
||||
|
||||
# connecting servers
|
||||
|
||||
Integrating a device into the monitor system has 2 steps:
|
||||
|
||||
1. Setup and start the periphery agent on the server
|
||||
2. Adding the server to monitor via the core API
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
||||
<DocCardList />
|
||||
```
|
||||
@@ -1,43 +0,0 @@
|
||||
# setup monitor periphery
|
||||
|
||||
The easiest way to do this is to follow the [monitor guide](https://github.com/mbecker20/monitor-guide). This is a repo containing directions and scripts enabling command line installation via ssh or remotely.
|
||||
|
||||
### manual install steps
|
||||
|
||||
1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases).
|
||||
|
||||
2. Create and edit your config files, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). The monitor cli can be used to add the boilerplate: ```monitor periphery gen-config --path /path/to/config.toml```. The files can be anywhere, and can be passed to periphery via the ```--config-path``` argument.
|
||||
|
||||
3. Ensure that inbound connectivity is allowed on the port specified in periphery.config.toml (default 8000).
|
||||
|
||||
4. Install docker. Make sure whatever user periphery is run as has access to the docker group without sudo.
|
||||
|
||||
5. Start the periphery binary with your preferred process manager, like systemd. The config read from the file is printed on startup, ensure that it is as expected.
|
||||
|
||||
### example periphery start command
|
||||
|
||||
```
|
||||
periphery \
|
||||
--config-path /path/to/periphery.config.base.toml \
|
||||
--config-path /other_path/to/overide-periphery-config-directory \
|
||||
--config-keyword periphery \
|
||||
--config-keyword config \
|
||||
--merge-nested-config \
|
||||
--home_dir /home/username
|
||||
```
|
||||
|
||||
### passing config files
|
||||
|
||||
Either file paths or directory paths can be passed to ```--config-path```.
|
||||
|
||||
When using directories, the file entries can be filtered by name with the ```--config-keyword``` argument, which can be passed multiple times to add more keywords. If passed, then only config files with file names that contain all keywords will be merged.
|
||||
|
||||
When passing multiple config files, later --config-path given in the command will always overide previous ones. Directory config files are merged in alphabetical order by name, so ```config_b.toml``` will overide ```config_a.toml```.
|
||||
|
||||
There are two ways to merge config files. The default behavior is to completely replace any base fields with whatever fields are present in the overide config. So if you pass ```allowed_ips = []``` in your overide config, the final allowed_ips will be an empty list as well.
|
||||
|
||||
```--merge-nested-config``` will merge config fields recursively and extend config array fields.
|
||||
|
||||
For example, with ```--merge-nested-config``` you can specify an allowed ip in the base config, and another in the overide config, they will both be present in the final config.
|
||||
|
||||
Similarly, you can specify a base docker / github account pair, and extend them with additional accounts in the overide config.
|
||||
@@ -1,40 +0,0 @@
|
||||
# core setup
|
||||
|
||||
setting up monitor core is fairly simple. there are some requirements to run monitor core:
|
||||
|
||||
- a valid configuration file
|
||||
- an instance of MongoDB to which monitor core can connect
|
||||
- docker must be installed on the host
|
||||
|
||||
## 1. create the configuration file
|
||||
|
||||
create a configuration file on the system, for example at `~/.monitor/core.config.toml`, and copy the [example config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). fill in all the necessary information before continuing.
|
||||
|
||||
:::note
|
||||
to enable OAuth2 login, you must create a client on the respective OAuth provider,
|
||||
for example [google](https://developers.google.com/identity/protocols/oauth2)
|
||||
or [github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps).
|
||||
monitor uses the `web application` login flow.
|
||||
the redirect uri is `<base_url>/auth/google/callback` for google and `<base_url>/auth/github/callback` for github.
|
||||
:::
|
||||
|
||||
## 2. start monitor core
|
||||
|
||||
monitor core is distributed via dockerhub under the public repo [mbecker2020/monitor_core](https://hub.docker.com/r/mbecker2020/monitor_core).
|
||||
|
||||
```sh
|
||||
docker run -d --name monitor-core \
|
||||
-v $HOME/.monitor/core.config.toml:/config/config.toml \
|
||||
-p 9000:9000 \
|
||||
mbecker2020/monitor_core
|
||||
```
|
||||
|
||||
## first login
|
||||
|
||||
monitor core should now be accessible on the specified port, so navigating to `http://<address>:<port>` will display the login page.
|
||||
|
||||
the first user to log in will be auto enabled and made admin. any additional users to create accounts will be disabled by default.
|
||||
|
||||
## https
|
||||
|
||||
monitor core itself only supports http, so a reverse proxy like [caddy](https://caddyserver.com/) should be used for https
|
||||
@@ -1,83 +0,0 @@
|
||||
# configuration
|
||||
|
||||
## choose the docker image
|
||||
|
||||
There are two options to configure the docker image to deploy.
|
||||
|
||||
### attaching a monitor build
|
||||
If the software you want to deploy is built by monitor, you can attach the build directly to the deployment.
|
||||
|
||||
By default, monitor will deploy the latest available version of the build, or you can specify a specific version using the version dropdown.
|
||||
|
||||
Also by default, monitor will use the same docker account that is attached to the build in order to pull the image on the periphery server. If that account is not available on the server, you can specify another available account to use instead, this account just needs to have read access to the docker repository.
|
||||
|
||||
### using a custom image
|
||||
You can also manually specify an image name, like ```mongo``` or ```mbecker2020/random_image:0.1.1```.
|
||||
|
||||
If the image repository is private, you can select an available docker account to use to pull the image.
|
||||
|
||||
## configuring the network
|
||||
|
||||
One feature of docker is that it allows for the creation of [virtual networks between containers](https://docs.docker.com/network/). Monitor allows you to specify a docker virtual network to connect the container to, or to use the host system networking to bypass the docker virtual network.
|
||||
|
||||
The default selection is ```host```, which bypasses the docker virtual network layer.
|
||||
|
||||
If you do select select a network other than host, you can specify port bindings with the GUI. For example, if you are running mongo (which defaults to port 27017), you could use the mapping:
|
||||
|
||||
```
|
||||
27018 : 27017
|
||||
```
|
||||
|
||||
In this case, you would access mongo from outside of the container on port ```27018```.
|
||||
|
||||
Note that this is not the only affect of using a network other than ```host```. For example, containers running on different networks can not communicate, and ones on the same network can not reach other containers on ```localhost``` even when they are running on the same system. This behavior can be a bit confusing if you are not familiar with it, and it can be bypassed entirely by just using ```host``` network.
|
||||
|
||||
## configuring restart behavior
|
||||
|
||||
Docker, like systemd, has a couple options for handling when a container exits. See [docker restart policies](https://docs.docker.com/config/containers/start-containers-automatically/). Monitor allows you to select the appropriate restart behavior from these options.
|
||||
|
||||
## configuring environment variables
|
||||
|
||||
Monitor enables you to easily manage environment variables passed to the container. In the GUI, click the 'edit' button on the 'environment' card, this will bring up the environment menu.
|
||||
|
||||
You pass environment variables just as you would with a ```.env``` file:
|
||||
|
||||
```
|
||||
ENV_VAR_1=some_value
|
||||
ENV_VAR_2=some_other_value
|
||||
```
|
||||
|
||||
## configuring volumes
|
||||
|
||||
A docker container's filesystem is segregated from that of the host. However, it is still possible for a container to access system files and directories, this is accomplished by using [bind mounts](https://docs.docker.com/storage/bind-mounts/).
|
||||
|
||||
Say your container needs to read a config file located on the system at ```/home/ubuntu/config.toml```. You can specify the bind mount to be:
|
||||
|
||||
```
|
||||
/home/ubuntu/config.toml : /config/config.toml
|
||||
```
|
||||
|
||||
The first path is the one on the system, the second is the path in the container. Your application would then read the file at ```/config/config.toml``` in order to load its contents.
|
||||
|
||||
These can be configured easily with the GUI in the 'volumes' card. You can configure as many bind mounts as you need.
|
||||
|
||||
## extra args
|
||||
|
||||
Not all features of docker are mapped directly by monitor, only the most common. You can still specify any custom flags for monitor to include in the ```docker run``` command by utilizing 'extra args'. For example, you can enable log rotation using these two extra args:
|
||||
|
||||
```
|
||||
--log-opt max-size=10M
|
||||
```
|
||||
```
|
||||
--log-opt max-file=3
|
||||
```
|
||||
|
||||
## post image
|
||||
|
||||
Sometimes you need to specify some flags to be passed directly to the application. What is put here is inserted into the docker run command after the image. For example, to pass the ```--quiet``` flag to MongoDB, the docker run command would be:
|
||||
|
||||
```
|
||||
docker run -d --name mongo-db mongo:6.0.3 --quiet
|
||||
```
|
||||
|
||||
In order to achieve this with monitor, just pass ```--quiet``` to 'post image'.
|
||||
@@ -1,11 +0,0 @@
|
||||
# deploy containers
|
||||
|
||||
Monitor can deploy any docker images that it can access with the configured docker accounts. It works by parsing the deployment configuration into a ```docker run``` command, which is then run on the target system. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well.
|
||||
|
||||
Deployment configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/file-paths).
|
||||
|
||||
```mdx-code-block
|
||||
import DocCardList from '@theme/DocCardList';
|
||||
|
||||
<DocCardList />
|
||||
```
|
||||
@@ -1,15 +0,0 @@
|
||||
# container management
|
||||
|
||||
the lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. monitor will display the state of the container and provides an API to manage all your container's lifetimes.
|
||||
|
||||
this is achieved internally by running the appropriate docker command for the requested action (docker stop, docker start, etc).
|
||||
|
||||
### stopping a container
|
||||
|
||||
sometimes you want to stop a running application but preserve its logs and configuration, either to be restarted later or to view the logs at a later time. It is more like *pausing* the application with its current config, as no configuration (like environment variable, volume mounts, etc.) will be changed when the container is started again.
|
||||
|
||||
note that in order to restart an application with updated configuration, it must be *redeployed*. stopping and starting a container again will keep all configuration as it was when the container was initially created.
|
||||
|
||||
### container redeploy
|
||||
|
||||
redeploying is the action of destroying a container and recreating it. If you update deployment config, these changes will not take effect until the container is redeployed. Just note this will destroy the previous containers logs along with the container itself.
|
||||
@@ -1,31 +0,0 @@
|
||||
# file paths
|
||||
|
||||
when working with monitor, you might have to configure file or directory paths.
|
||||
|
||||
## relative paths
|
||||
|
||||
Where possible, it is better to use relative file paths. Using relative file paths removes the connection between the process being run and the particular server it runs on, making it easier to move things between servers.
|
||||
|
||||
Where you see relative paths:
|
||||
|
||||
- setting the build directory and path of the Dockerfile
|
||||
- setting a pre build command path
|
||||
- configuring a frontend mount (used for web apps)
|
||||
|
||||
For all of the above, the path can be given relative to the root of the configured repo
|
||||
|
||||
The one exception is the Dockerfile path, which is given relative to the build directory (This is done by Docker itself, and this pattern matches usage of the Docker CLI).
|
||||
|
||||
There are 3 kinds of paths to pass:
|
||||
|
||||
1. to specify the root of the repo, use ```.``` as the path
|
||||
2. to specify a folder in the repo, pass it with **no** preceding ```/```. For example, ```example_folder``` or ```folder1/folder2```
|
||||
3. to specify an absolute path on the servers filesystem, use a preceding slash, eg. ```/home/ubuntu/example```. This way should only be used if absolutely necessary, like when passing host paths when configuring docker volumes.
|
||||
|
||||
### implementation
|
||||
|
||||
relative file paths are joined with the path of the repo on the system using a Rust [PathBuf](https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push).
|
||||
|
||||
## Docker Volume Paths
|
||||
|
||||
These are passed directly to the Docker CLI using ```--volume /path/on/system:/path/in/container```. So for these, the same rules apply as when using Docker on the command line. Paths here should be given as absolute, don't use ```~``` or even ```$HOME```.
|
||||
@@ -1,44 +0,0 @@
|
||||
---
|
||||
slug: /intro
|
||||
---
|
||||
|
||||
# what is monitor?
|
||||
|
||||
If you have many servers running many applications, it can be a challenge to keep things organized and easily accessible. Without structure, things can become messy quickly, which means operational issues are more likely to arise and they can take longer to resolve. Ultimately these issues hinder productivity and waste valuable time. Monitor is a web app to provide this structure for how applications are built, deployed, and managed across many servers.
|
||||
|
||||
## docker
|
||||
|
||||
Monitor is opinionated by design, and [docker](https://docs.docker.com/) is the tool of choice. Docker provides the ability to package applications and their runtime dependencies into a standalone bundle, called an *image*. This makes them easy to "ship" to any server and run without the hassle of setting up the runtime environment. Docker uses the image as a sort of template to create *containers*. Containers are kind of like virtual machines but with different performance characteristics, namely that processes contained still run natively on the system kernel. The file system is seperate though, and like virtual machines, they can be created, started, stopped, and destroyed.
|
||||
|
||||
## monitor
|
||||
|
||||
Monitor is a solution for handling for the following:
|
||||
|
||||
1. Build application source into auto-versioned images.
|
||||
2. Create, start, stop, and restart Docker containers, and view their status and logs.
|
||||
3. Keep a record of all the actions that are performed and by whom.
|
||||
4. View realtime and historical system resource usage.
|
||||
5. Alerting for server health, like high cpu, memory, disk, etc.
|
||||
|
||||
## architecture and components
|
||||
|
||||
Monitor is composed of a single core and any amount of connected servers running the periphery application.
|
||||
|
||||
### monitor core
|
||||
The core is a web server that hosts the core API and serves the frontend to be accessed in a web browser. All user interaction with the connected servers flow through the core. It is the stateful part of the system, with the application state stored on an instance of MongoDB.
|
||||
|
||||
### monitor periphery
|
||||
The periphery is a stateless web server that exposes API called by the core. The core calls this API to get system usage and container status / logs, clone git repos, and perform docker actions. It is only intended to be reached from the core, and has an address whitelist to limit the IPs allowed to call this API.
|
||||
|
||||
### monitor cli
|
||||
This is a simple standalone cli that helps perform some actions required to setup monitor core and periphery, like generating config files.
|
||||
|
||||
## core API
|
||||
|
||||
Monitor exposes powerful functionality over the core's REST API, enabling infrastructure engineers to manage deployments programmatically in addition to with the GUI. There is a [rust crate](https://crates.io/crates/monitor_client) to simplify programmatic interaction with the API, but in general this can be accomplished using any programming language that can make REST requests.
|
||||
|
||||
## permissioning
|
||||
|
||||
Monitor is a system designed to be used by many users, whether they are developers, operations personnel, or administrators. The ability to affect an applications state is very powerful, so monitor has a granular permissioning system to only provide this functionality to the intended users. The permissioning system is explained in detail in the [permissioning](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md) section.
|
||||
|
||||
User sign-on is possible using username / password, or with Oauth (Github and Google). Allowed login methods can be configured from the [core config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml).
|
||||
@@ -1,28 +0,0 @@
|
||||
# permissioning resources
|
||||
|
||||
All monitor resources (servers, builds, deployment) have independant permission tables to allow for users to have granular access to these resources. By default, users do not see any resources until they are given at least read permissions.
|
||||
|
||||
## permission levels
|
||||
|
||||
There are 4 levels of permissions a user can have on a resource:
|
||||
|
||||
1. **None**. This is the lowest permission level, and means the user will not have any access to this resource. They will not see it in the GUI, and it will not show up if the user queries the core API directly. All attempts to view or update the resource will be blocked.
|
||||
|
||||
2. **Read**. This is the first permission level that grants any access. It will enable the user to see the resource in the GUI, read the configuration, and see any logs. Any attempts to update configuration or trigger any action will be blocked.
|
||||
|
||||
3. **Execute**. This level will allow the user to execute actions on the resource, like send a build command or trigger a redeploy. The user will still be blocked from updating configuration on the resource.
|
||||
|
||||
4. **Update**. The user has full access to the resource, they can execute any actions, update the configuration, and delete the resource.
|
||||
|
||||
## Administration
|
||||
|
||||
Users can be given admin priviledges by accessing the monitor MongoDB and setting ```admin: true``` on the intended user document. These users have unrestricted access to all monitor resources, like servers, builds, and deployments. Additionally, only these users can update other (non-admin) user's permissions on resources, an action not available to regular users even with **Update** level permissions.
|
||||
|
||||
Monitor admins are responsible for managing user accounts as well. When a user logs into monitor for the first time, they will not immediately be granted access. An admin must first **enable** the user, which can be done from the 'manage users' page (found in the user dropdown menu in the topbar). Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API.
|
||||
|
||||
Users also have some configurable global permissions, these are:
|
||||
|
||||
- create server permission
|
||||
- create build permission
|
||||
|
||||
Only users with these permissions (as well as admins) can add additional servers to monitor, and can create additional builds, respectively.
|
||||
@@ -1,99 +0,0 @@
|
||||
// @ts-check
|
||||
// Note: type annotations allow type checking and IDEs autocompletion
|
||||
|
||||
const lightCodeTheme = require('prism-react-renderer/themes/github');
|
||||
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
|
||||
|
||||
/** @type {import('@docusaurus/types').Config} */
|
||||
const config = {
|
||||
title: "monitor",
|
||||
tagline: "distributed build and deployment system",
|
||||
favicon: "img/favicon.ico",
|
||||
|
||||
// Set the production url of your site here
|
||||
url: "https://mbecker20.github.io",
|
||||
// Set the /<baseUrl>/ pathname under which your site is served
|
||||
// For GitHub pages deployment, it is often '/<projectName>/'
|
||||
baseUrl: "/monitor/",
|
||||
// baseUrl: "/",
|
||||
|
||||
// GitHub pages deployment config.
|
||||
// If you aren't using GitHub pages, you don't need these.
|
||||
organizationName: "mbecker20", // Usually your GitHub org/user name.
|
||||
projectName: "monitor", // Usually your repo name.
|
||||
trailingSlash: false,
|
||||
deploymentBranch: "gh-pages-docs",
|
||||
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
|
||||
// Even if you don't use internalization, you can use this field to set useful
|
||||
// metadata like html lang. For example, if your site is Chinese, you may want
|
||||
// to replace "en" with "zh-Hans".
|
||||
i18n: {
|
||||
defaultLocale: "en",
|
||||
locales: ["en"],
|
||||
},
|
||||
|
||||
presets: [
|
||||
[
|
||||
"classic",
|
||||
/** @type {import('@docusaurus/preset-classic').Options} */
|
||||
({
|
||||
docs: {
|
||||
routeBasePath: "/",
|
||||
sidebarPath: require.resolve("./sidebars.js"),
|
||||
// Please change this to your repo.
|
||||
// Remove this to remove the "edit this page" links.
|
||||
editUrl: "https://github.com/mbecker20/monitor/tree/main/docsite",
|
||||
},
|
||||
theme: {
|
||||
customCss: require.resolve("./src/css/custom.css"),
|
||||
},
|
||||
}),
|
||||
],
|
||||
],
|
||||
|
||||
themeConfig:
|
||||
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
|
||||
({
|
||||
// Replace with your project's social card
|
||||
image: "img/monitor-lizard.png",
|
||||
docs: {
|
||||
sidebar: {
|
||||
autoCollapseCategories: true,
|
||||
}
|
||||
},
|
||||
navbar: {
|
||||
title: "monitor",
|
||||
logo: {
|
||||
alt: "monitor lizard",
|
||||
src: "img/monitor-lizard.png",
|
||||
},
|
||||
items: [
|
||||
{
|
||||
type: "docSidebar",
|
||||
sidebarId: "docs",
|
||||
position: "left",
|
||||
label: "docs",
|
||||
|
||||
},
|
||||
{
|
||||
href: "https://github.com/mbecker20/monitor",
|
||||
label: "GitHub",
|
||||
position: "right",
|
||||
},
|
||||
],
|
||||
},
|
||||
footer: {
|
||||
style: "dark",
|
||||
copyright: `Built with Docusaurus`,
|
||||
},
|
||||
prism: {
|
||||
theme: lightCodeTheme,
|
||||
darkTheme: darkCodeTheme,
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
module.exports = config;
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"name": "docsite",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "docusaurus start",
|
||||
"deploy": "GIT_USER=mbecker20 docusaurus deploy"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "2.4.0",
|
||||
"@docusaurus/preset-classic": "2.4.0",
|
||||
"@mdx-js/react": "^1.6.22",
|
||||
"clsx": "^1.2.1",
|
||||
"prism-react-renderer": "^1.3.5",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "2.4.0",
|
||||
"@tsconfig/docusaurus": "^1.0.5",
|
||||
"typescript": "^4.7.4"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.5%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.14"
|
||||
}
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
/**
|
||||
* Creating a sidebar enables you to:
|
||||
- create an ordered group of docs
|
||||
- render a sidebar for each doc of that group
|
||||
- provide next/previous navigation
|
||||
|
||||
The sidebars can be generated from the filesystem, or explicitly defined here.
|
||||
|
||||
Create as many sidebars as you want.
|
||||
*/
|
||||
|
||||
// @ts-check
|
||||
|
||||
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
|
||||
const sidebars = {
|
||||
// By default, Docusaurus generates a sidebar from the docs folder structure
|
||||
// docsSidebar: [{type: 'autogenerated', dirName: '.'}],
|
||||
|
||||
// But you can create a sidebar manually
|
||||
docs: [
|
||||
"intro",
|
||||
"core-setup",
|
||||
{
|
||||
type: "category",
|
||||
label: "connecting servers",
|
||||
link: {
|
||||
type: "doc",
|
||||
id: "connecting-servers/index",
|
||||
},
|
||||
items: [
|
||||
"connecting-servers/setup-periphery",
|
||||
"connecting-servers/add-server",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "build images",
|
||||
link: {
|
||||
type: "doc",
|
||||
id: "build-images/index",
|
||||
},
|
||||
items: [
|
||||
"build-images/configuration",
|
||||
"build-images/pre-build",
|
||||
"build-images/choosing-builder",
|
||||
"build-images/versioning",
|
||||
],
|
||||
},
|
||||
{
|
||||
type: "category",
|
||||
label: "deploy containers",
|
||||
link: {
|
||||
type: "doc",
|
||||
id: "deploy-containers/index",
|
||||
},
|
||||
items: [
|
||||
"deploy-containers/configuration",
|
||||
"deploy-containers/lifetime-management",
|
||||
// "deploy-containers/choosing-builder",
|
||||
// "deploy-containers/versioning",
|
||||
],
|
||||
},
|
||||
"permissioning",
|
||||
"file-paths",
|
||||
{
|
||||
type: "category",
|
||||
label: "API",
|
||||
link: {
|
||||
type: "doc",
|
||||
id: "api/index",
|
||||
},
|
||||
items: [
|
||||
"api/types",
|
||||
"api/authenticating-requests",
|
||||
"api/login",
|
||||
"api/api-secrets",
|
||||
"api/build",
|
||||
"api/deployment",
|
||||
"api/server",
|
||||
"api/permissions",
|
||||
"api/websocket",
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
module.exports = sidebars;
|
||||
@@ -1,15 +0,0 @@
|
||||
import React from "react";
|
||||
|
||||
export default function Divider() {
|
||||
return (
|
||||
<div
|
||||
style={{
|
||||
opacity: 0.7,
|
||||
backgroundColor: "rgb(175, 175, 175)",
|
||||
height: "3px",
|
||||
width: "100%",
|
||||
margin: "75px 0px"
|
||||
}}
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
import styles from './styles.module.css';
|
||||
|
||||
type FeatureItem = {
|
||||
title: string;
|
||||
// Svg: React.ComponentType<React.ComponentProps<'svg'>>;
|
||||
description: JSX.Element;
|
||||
};
|
||||
|
||||
const FeatureList: FeatureItem[] = [
|
||||
{
|
||||
title: 'automated builds 🛠️',
|
||||
// Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default,
|
||||
description: (
|
||||
<>
|
||||
build auto versioned docker images from github repos, trigger builds on git push
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'deploy docker containers 🚀',
|
||||
// Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default,
|
||||
description: (
|
||||
<>
|
||||
deploy your builds (or any docker image), see uptime and logs across all your servers
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'powered by Rust 🦀',
|
||||
// Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
|
||||
description: (
|
||||
<>
|
||||
The core API and periphery client are written in Rust
|
||||
</>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
function Feature({ title, description }: FeatureItem) {
|
||||
return (
|
||||
<div className={clsx('col col--4')}>
|
||||
{/* <div className="text--center">
|
||||
<Svg className={styles.featureSvg} role="img" />
|
||||
</div> */}
|
||||
<div className="text--center padding-horiz--md">
|
||||
<h3>{title}</h3>
|
||||
<p>{description}</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function HomepageFeatures(): JSX.Element {
|
||||
return (
|
||||
<section className={styles.features}>
|
||||
<div className="container">
|
||||
<div className="row">
|
||||
{FeatureList.map((props, idx) => (
|
||||
<Feature key={idx} {...props} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
.features {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 4rem 0;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.featureSvg {
|
||||
height: 200px;
|
||||
width: 200px;
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
import React from "react";
|
||||
|
||||
export default function MonitorLogo({ width = "4rem" }) {
|
||||
return (
|
||||
<img
|
||||
style={{ width, height: "auto", opacity: 0.7 }}
|
||||
src="img/monitor-lizard.png"
|
||||
alt="monitor-lizard"
|
||||
/>
|
||||
);
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
import React from "react";
|
||||
|
||||
export default function SummaryImg() {
|
||||
return (
|
||||
<div style={{ display: "flex", justifyContent: "center" }}>
|
||||
<img
|
||||
style={{ marginBottom: "4rem", width: "1000px" }}
|
||||
src="img/monitor-summary.png"
|
||||
alt="monitor-summary"
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
/**
|
||||
* Any CSS included here will be global. The classic template
|
||||
* bundles Infima by default. Infima is a CSS framework designed to
|
||||
* work well for content-centric websites.
|
||||
*/
|
||||
|
||||
/* You can override the default Infima variables here. */
|
||||
:root {
|
||||
--ifm-color-primary: #2e8555;
|
||||
--ifm-color-primary-dark: #29784c;
|
||||
--ifm-color-primary-darker: #277148;
|
||||
--ifm-color-primary-darkest: #205d3b;
|
||||
--ifm-color-primary-light: #33925d;
|
||||
--ifm-color-primary-lighter: #359962;
|
||||
--ifm-color-primary-lightest: #3cad6e;
|
||||
--ifm-code-font-size: 95%;
|
||||
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
/* For readability concerns, you should choose a lighter palette in dark mode. */
|
||||
[data-theme='dark'] {
|
||||
--ifm-color-primary: #25c2a0;
|
||||
--ifm-color-primary-dark: #21af90;
|
||||
--ifm-color-primary-darker: #1fa588;
|
||||
--ifm-color-primary-darkest: #1a8870;
|
||||
--ifm-color-primary-light: #29d5b0;
|
||||
--ifm-color-primary-lighter: #32d8b4;
|
||||
--ifm-color-primary-lightest: #4fddbf;
|
||||
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/**
|
||||
* CSS files with the .module.css suffix will be treated as CSS modules
|
||||
* and scoped locally.
|
||||
*/
|
||||
|
||||
.heroBanner {
|
||||
padding: 4rem 0;
|
||||
text-align: center;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 996px) {
|
||||
.heroBanner {
|
||||
padding: 2rem;
|
||||
}
|
||||
}
|
||||
|
||||
.buttons {
|
||||
display: grid;
|
||||
gap: 1rem;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
width: fit-content;
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
import React from 'react';
|
||||
import clsx from 'clsx';
|
||||
import Link from '@docusaurus/Link';
|
||||
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
|
||||
import Layout from '@theme/Layout';
|
||||
import HomepageFeatures from '@site/src/components/HomepageFeatures';
|
||||
|
||||
import styles from './index.module.css';
|
||||
import SummaryImg from '../components/SummaryImg';
|
||||
import MonitorLogo from '../components/MonitorLogo';
|
||||
|
||||
function HomepageHeader() {
|
||||
const {siteConfig} = useDocusaurusContext();
|
||||
return (
|
||||
<header className={clsx("hero hero--primary", styles.heroBanner)}>
|
||||
<div className="container">
|
||||
<div style={{ display: "flex", gap: "1rem", justifyContent: "center" }}>
|
||||
<div style={{ position: "relative" }}>
|
||||
<MonitorLogo width="600px" />
|
||||
<h1
|
||||
className="hero__title"
|
||||
style={{
|
||||
margin: 0,
|
||||
position: "absolute",
|
||||
top: "40%",
|
||||
left: "50%",
|
||||
transform: "translate(-50%, -50%)",
|
||||
}}
|
||||
>
|
||||
monitor
|
||||
</h1>
|
||||
</div>
|
||||
</div>
|
||||
<p className="hero__subtitle">{siteConfig.tagline}</p>
|
||||
<div style={{ display: "flex", justifyContent: "center" }}>
|
||||
<div className={styles.buttons}>
|
||||
<Link className="button button--secondary button--lg" to="/intro">
|
||||
docs
|
||||
</Link>
|
||||
<Link
|
||||
className="button button--secondary button--lg"
|
||||
to="https://github.com/mbecker20/monitor"
|
||||
>
|
||||
github
|
||||
</Link>
|
||||
<Link
|
||||
className="button button--secondary button--lg"
|
||||
to="https://github.com/mbecker20/monitor#readme"
|
||||
style={{
|
||||
width: "100%",
|
||||
boxSizing: "border-box",
|
||||
gridColumn: "span 2",
|
||||
}}
|
||||
>
|
||||
screenshots
|
||||
</Link>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</header>
|
||||
);
|
||||
}
|
||||
|
||||
export default function Home(): JSX.Element {
|
||||
const {siteConfig} = useDocusaurusContext();
|
||||
return (
|
||||
<Layout title="monitor docs" description={siteConfig.tagline}>
|
||||
{/* <SummaryImg /> */}
|
||||
<HomepageHeader />
|
||||
<main>
|
||||
<HomepageFeatures />
|
||||
{/* <SummaryImg /> */}
|
||||
</main>
|
||||
</Layout>
|
||||
);
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 15 KiB |
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 6.3 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 69 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 117 KiB |
@@ -1,7 +0,0 @@
|
||||
{
|
||||
// This file is not used in compilation. It is here just for a nice editor experience.
|
||||
"extends": "@tsconfig/docusaurus/tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"baseUrl": "."
|
||||
}
|
||||
}
|
||||
7617
docsite/yarn.lock
7617
docsite/yarn.lock
File diff suppressed because it is too large
Load Diff
@@ -1,26 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="theme-color" content="#000000" />
|
||||
|
||||
<link rel="shortcut icon" type="image/ico" href="/assets/favicon.ico" />
|
||||
<link rel="apple-touch-icon" sizes="180x180" href="/assets/apple-touch-icon.png" />
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/assets/favicon-32x32.png">
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/assets/favicon-16x16.png">
|
||||
<link rel="manifest" href="/assets/manifest.json" />
|
||||
|
||||
<title>monitor</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
<div id="root" class="app-bounder"></div>
|
||||
|
||||
<script src="/src/index.tsx" type="module"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"name": "vite-template-solid",
|
||||
"version": "0.0.0",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "vite",
|
||||
"dev": "vite",
|
||||
"check": "tsc",
|
||||
"build": "vite build && node post-build.mjs",
|
||||
"serve": "vite preview"
|
||||
},
|
||||
"license": "GPL v3.0",
|
||||
"devDependencies": {
|
||||
"@types/sanitize-html": "^2.9.0",
|
||||
"sass": "^1.57.1",
|
||||
"typescript": "^4.9.4",
|
||||
"vite": "^4.0.3",
|
||||
"vite-plugin-solid": "^2.5.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@solidjs/router": "^0.6.0",
|
||||
"@tanstack/solid-query": "^4.26.0",
|
||||
"ansi-to-html": "^0.7.2",
|
||||
"axios": "^1.2.1",
|
||||
"js-file-download": "^0.4.12",
|
||||
"lightweight-charts": "^3.8.0",
|
||||
"reconnecting-websocket": "^4.4.0",
|
||||
"sanitize-html": "^2.10.0",
|
||||
"solid-js": "^1.6.6"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
import { readdirSync, renameSync, rmdirSync } from "fs";
|
||||
|
||||
const files = readdirSync("./build/assets");
|
||||
|
||||
for (const file of files) {
|
||||
renameSync("./build/assets/" + file, "./build/" + file);
|
||||
}
|
||||
|
||||
rmdirSync("./build/assets");
|
||||
|
||||
console.log("\npost build complete\n")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user