Compare commits
201 Commits
h.n.o-2.19
...
use-store-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02a65eb8b9 | ||
|
|
18c0d76210 | ||
|
|
4a4a0f901c | ||
|
|
881462bb4e | ||
|
|
af72b694d8 | ||
|
|
c92342d12f | ||
|
|
df07670a21 | ||
|
|
1381ee85d2 | ||
|
|
b767c82b6e | ||
|
|
19a1c5ff04 | ||
|
|
cde792e718 | ||
|
|
9b0d74ed84 | ||
|
|
a94c1aeac4 | ||
|
|
9df591a8dd | ||
|
|
8be9f4c938 | ||
|
|
51944a5fa5 | ||
|
|
341b2f1309 | ||
|
|
4dc0f11379 | ||
|
|
ea09952b7e | ||
|
|
81d21979ef | ||
|
|
0ed9a82912 | ||
|
|
80241fc8be | ||
|
|
4347833f45 | ||
|
|
8835cbd10f | ||
|
|
9ad8ac586c | ||
|
|
9a6928d93b | ||
|
|
810781a802 | ||
|
|
af9b0663f2 | ||
|
|
c6f98202cd | ||
|
|
1dbc7f5845 | ||
|
|
c52845f560 | ||
|
|
85383b9522 | ||
|
|
2f92846e5a | ||
|
|
d84ff32ce6 | ||
|
|
0c9726af59 | ||
|
|
5100b85537 | ||
|
|
141b5fd0b5 | ||
|
|
8d78648e65 | ||
|
|
8a8ac14877 | ||
|
|
250668a19f | ||
|
|
efadb6a26c | ||
|
|
3b16941b14 | ||
|
|
9de9cb0ad8 | ||
|
|
e75a4cbda8 | ||
|
|
6456c1d7d6 | ||
|
|
182a48c9fb | ||
|
|
f974891c76 | ||
|
|
8515cb183e | ||
|
|
60dd7ec187 | ||
|
|
53b04ddf74 | ||
|
|
4e2c06ec2c | ||
|
|
d3966d3e4c | ||
|
|
f442d74f6e | ||
|
|
a9a5b14331 | ||
|
|
e6b9f0dec7 | ||
|
|
72899596df | ||
|
|
bdeec354c3 | ||
|
|
1222ba03a6 | ||
|
|
8a54924d2a | ||
|
|
2a7b070da0 | ||
|
|
c69e30122b | ||
|
|
750275d6e8 | ||
|
|
ceb8b48cce | ||
|
|
95003f2eb5 | ||
|
|
012cbd43f5 | ||
|
|
9a75361781 | ||
|
|
029116422d | ||
|
|
108e409559 | ||
|
|
1a5bd9e103 | ||
|
|
647191cd4a | ||
|
|
73e51b94b1 | ||
|
|
1ef6b5e7b4 | ||
|
|
44248d3cf4 | ||
|
|
cc1b6d394e | ||
|
|
b472f55563 | ||
|
|
c61bdd2c28 | ||
|
|
0231453cc5 | ||
|
|
ae787e5799 | ||
|
|
2dad87ad89 | ||
|
|
b6f44b5cd0 | ||
|
|
c8b7a0fea9 | ||
|
|
2d79b0a4da | ||
|
|
f730433789 | ||
|
|
916531dc9c | ||
|
|
0ead8dc65c | ||
|
|
b1a0501520 | ||
|
|
b94a7b6d5c | ||
|
|
9ee3c6aea2 | ||
|
|
02a514234b | ||
|
|
54a9729a0f | ||
|
|
250780aaf2 | ||
|
|
4bb2f08be1 | ||
|
|
c23973785f | ||
|
|
b2b2d6e26c | ||
|
|
99ca560d58 | ||
|
|
2c886f51d3 | ||
|
|
7de7122479 | ||
|
|
54002f0fcf | ||
|
|
a6b14369ee | ||
|
|
578a3d2292 | ||
|
|
ada51d70fc | ||
|
|
bc19e7cd65 | ||
|
|
d7986226f0 | ||
|
|
2feddd8511 | ||
|
|
cd925e876f | ||
|
|
b3e0d9a8b7 | ||
|
|
5728011da1 | ||
|
|
91bb72e323 | ||
|
|
09a1e64ed2 | ||
|
|
bede2a141a | ||
|
|
b75bf5c882 | ||
|
|
d55bea2a1e | ||
|
|
346badc66f | ||
|
|
a940450875 | ||
|
|
879ceb5cdc | ||
|
|
898ca2f600 | ||
|
|
559376e907 | ||
|
|
21044bc4d9 | ||
|
|
af120e7195 | ||
|
|
71c4e2dc5b | ||
|
|
e4552ddf91 | ||
|
|
e4f2c84f8d | ||
|
|
e10fc2bd13 | ||
|
|
998df1657e | ||
|
|
f99cdaf5fe | ||
|
|
5e910fa2ce | ||
|
|
4b767aa9a2 | ||
|
|
3bf00e31c0 | ||
|
|
2926aa1d64 | ||
|
|
e149da7b9b | ||
|
|
555ea44a7a | ||
|
|
e81c36ac92 | ||
|
|
743795b2b0 | ||
|
|
50378aef22 | ||
|
|
92155f9a07 | ||
|
|
29ce5c603c | ||
|
|
410077a26e | ||
|
|
4bd687e3e6 | ||
|
|
1b8154e67f | ||
|
|
b72528be50 | ||
|
|
8b48579593 | ||
|
|
39a4e4791e | ||
|
|
ef7bf1e67b | ||
|
|
ab1f64aa4d | ||
|
|
3f913a771d | ||
|
|
71986632ce | ||
|
|
1665aed5e3 | ||
|
|
b676b08fac | ||
|
|
d614163e9c | ||
|
|
99afff03b0 | ||
|
|
8f56209bd6 | ||
|
|
806c375c33 | ||
|
|
669617ab54 | ||
|
|
c45c06509a | ||
|
|
9db5d0a88d | ||
|
|
973cb644d3 | ||
|
|
e499509595 | ||
|
|
ceff5c5cfe | ||
|
|
878c0f240e | ||
|
|
c1bd50a80d | ||
|
|
14aabc1cc9 | ||
|
|
7b826ec5ad | ||
|
|
7a53b866f6 | ||
|
|
449eb2d873 | ||
|
|
2bdbf51d7d | ||
|
|
9e7ac58042 | ||
|
|
9a86da0e7b | ||
|
|
181b52787e | ||
|
|
20b0ad3ba2 | ||
|
|
7386caaecf | ||
|
|
84c46b6b68 | ||
|
|
f1d9230f25 | ||
|
|
34c51fcea9 | ||
|
|
4ac31c89df | ||
|
|
db7aa01b8d | ||
|
|
89cfe26533 | ||
|
|
f3a760ad9c | ||
|
|
8c10331ee8 | ||
|
|
20f5a2120c | ||
|
|
b56d2383c1 | ||
|
|
2bd67562b5 | ||
|
|
69a5b00e60 | ||
|
|
1d80b72ffb | ||
|
|
105fd18fee | ||
|
|
f6f817926a | ||
|
|
d0d3b0a298 | ||
|
|
3f932a6731 | ||
|
|
aaa0e128c1 | ||
|
|
4515b5aa17 | ||
|
|
fdd70363d7 | ||
|
|
20c8263e3c | ||
|
|
42cc55abf0 | ||
|
|
5c7e5b6465 | ||
|
|
89c504e2d9 | ||
|
|
2c3072aaee | ||
|
|
a81c6a3a80 | ||
|
|
750978a192 | ||
|
|
71796e7c8b | ||
|
|
022160809b | ||
|
|
262a6027e1 | ||
|
|
3246bb6807 |
42
.gitignore
vendored
42
.gitignore
vendored
@@ -1,48 +1,8 @@
|
||||
/.pls_cache
|
||||
*.o
|
||||
*~
|
||||
Makefile
|
||||
Makefile.in
|
||||
.deps
|
||||
.hydra-data
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/configure
|
||||
/depcomp
|
||||
/libtool
|
||||
/ltmain.sh
|
||||
/autom4te.cache
|
||||
/aclocal.m4
|
||||
/missing
|
||||
/install-sh
|
||||
.test_info.*
|
||||
/src/sql/hydra-postgresql.sql
|
||||
/src/sql/hydra-sqlite.sql
|
||||
/src/sql/tmp.sqlite
|
||||
/src/hydra-eval-jobs/hydra-eval-jobs
|
||||
/src/root/static/bootstrap
|
||||
/src/root/static/js/flot
|
||||
/tests
|
||||
/doc/manual/images
|
||||
/doc/manual/manual.html
|
||||
/doc/manual/manual.pdf
|
||||
/t/.bzr*
|
||||
/t/.git*
|
||||
/t/.hg*
|
||||
/t/nix
|
||||
/t/data
|
||||
/t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
/inst
|
||||
hydra-config.h
|
||||
hydra-config.h.in
|
||||
result
|
||||
result-*
|
||||
outputs
|
||||
config
|
||||
stamp-h1
|
||||
src/hydra-evaluator/hydra-evaluator
|
||||
src/hydra-queue-runner/hydra-queue-runner
|
||||
src/root/static/fontawesome/
|
||||
src/root/static/bootstrap*/
|
||||
|
||||
12
Makefile.am
12
Makefile.am
@@ -1,12 +0,0 @@
|
||||
SUBDIRS = src doc
|
||||
if CAN_DO_CHECK
|
||||
SUBDIRS += t
|
||||
endif
|
||||
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
EXTRA_DIST = nixos-modules/hydra.nix
|
||||
|
||||
install-data-local: nixos-modules/hydra.nix
|
||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
||||
$(INSTALL_DATA) nixos-modules/hydra.nix $(DESTDIR)$(datadir)/nix/hydra-module.nix
|
||||
10
README.md
10
README.md
@@ -39,16 +39,16 @@ In order to evaluate and build anything you need to create _projects_ that conta
|
||||
#### Creating A Project
|
||||
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
||||
|
||||
- **Identifier**: `hello`
|
||||
- **Identifier**: `hello-project`
|
||||
- **Display name**: `hello`
|
||||
- **Description**: `hello project`
|
||||
|
||||
Click "_Create project_".
|
||||
|
||||
#### Creating A Jobset
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values:
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
|
||||
|
||||
- **Identifier**: `hello`
|
||||
- **Identifier**: `hello-project`
|
||||
- **Nix expression**: `examples/hello.nix` in `hydra`
|
||||
- **Check interval**: 60
|
||||
- **Scheduling shares**: 1
|
||||
@@ -57,7 +57,7 @@ We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_
|
||||
|
||||
- **Input name**: `nixpkgs`
|
||||
- **Type**: `Git checkout`
|
||||
- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03`
|
||||
- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
|
||||
|
||||
- **Input name**: `hydra`
|
||||
- **Type**: `Git checkout`
|
||||
@@ -140,7 +140,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
|
||||
## Additional Resources
|
||||
|
||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
|
||||
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||
|
||||
|
||||
91
configure.ac
91
configure.ac
@@ -1,91 +0,0 @@
|
||||
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])])
|
||||
AC_CONFIG_AUX_DIR(config)
|
||||
AM_INIT_AUTOMAKE([foreign serial-tests])
|
||||
|
||||
AC_LANG([C++])
|
||||
|
||||
AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_LN_S
|
||||
AC_PROG_LIBTOOL
|
||||
AC_PROG_CXX
|
||||
|
||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
||||
|
||||
AC_ARG_WITH([docbook-xsl],
|
||||
[AS_HELP_STRING([--with-docbook-xsl=PATH],
|
||||
[path of the DocBook XSL stylesheets])],
|
||||
[docbookxsl="$withval"],
|
||||
[docbookxsl="/docbook-xsl-missing"])
|
||||
AC_SUBST([docbookxsl])
|
||||
|
||||
|
||||
AC_DEFUN([NEED_PROG],
|
||||
[
|
||||
AC_PATH_PROG($1, $2)
|
||||
if test -z "$$1"; then
|
||||
AC_MSG_ERROR([$2 is required])
|
||||
fi
|
||||
])
|
||||
|
||||
NEED_PROG(perl, perl)
|
||||
|
||||
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
|
||||
|
||||
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
|
||||
if test -n "$NIX_STORE" -a -n "$TMPDIR"
|
||||
then
|
||||
# This may be executed from within a build chroot, so pacify
|
||||
# `nix-store' instead of letting it choke while trying to mkdir
|
||||
# /nix/var.
|
||||
NIX_STATE_DIR="$TMPDIR"
|
||||
export NIX_STATE_DIR
|
||||
fi
|
||||
if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
|
||||
fi
|
||||
|
||||
PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
||||
|
||||
testPath="$(dirname $(type -p expr))"
|
||||
AC_SUBST(testPath)
|
||||
|
||||
CXXFLAGS+=" -include nix/config.h"
|
||||
|
||||
AC_CONFIG_FILES([
|
||||
Makefile
|
||||
doc/Makefile
|
||||
doc/manual/Makefile
|
||||
src/Makefile
|
||||
src/hydra-evaluator/Makefile
|
||||
src/hydra-eval-jobs/Makefile
|
||||
src/hydra-queue-runner/Makefile
|
||||
src/sql/Makefile
|
||||
src/ttf/Makefile
|
||||
src/lib/Makefile
|
||||
src/root/Makefile
|
||||
src/script/Makefile
|
||||
])
|
||||
|
||||
# Tests might be filtered out
|
||||
AM_CONDITIONAL([CAN_DO_CHECK], [test -f "$srcdir/t/api-test.t"])
|
||||
AM_COND_IF(
|
||||
[CAN_DO_CHECK],
|
||||
[
|
||||
jobsPath="$(realpath ./t/jobs)"
|
||||
AC_SUBST(jobsPath)
|
||||
AC_CONFIG_FILES([
|
||||
t/Makefile
|
||||
t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
])
|
||||
])
|
||||
|
||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
||||
|
||||
AC_CONFIG_HEADER([hydra-config.h])
|
||||
|
||||
AC_OUTPUT
|
||||
@@ -1,6 +1,6 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).defaultNix
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
SUBDIRS = manual
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
MD_FILES = src/*.md
|
||||
|
||||
EXTRA_DIST = $(MD_FILES)
|
||||
|
||||
install: $(MD_FILES)
|
||||
mdbook build . -d $(docdir)
|
||||
36
doc/manual/meson.build
Normal file
36
doc/manual/meson.build
Normal file
@@ -0,0 +1,36 @@
|
||||
srcs = files(
|
||||
'src/SUMMARY.md',
|
||||
'src/about.md',
|
||||
'src/api.md',
|
||||
'src/configuration.md',
|
||||
'src/hacking.md',
|
||||
'src/installation.md',
|
||||
'src/introduction.md',
|
||||
'src/jobs.md',
|
||||
'src/monitoring/README.md',
|
||||
'src/notifications.md',
|
||||
'src/plugins/README.md',
|
||||
'src/plugins/RunCommand.md',
|
||||
'src/plugins/declarative-projects.md',
|
||||
'src/projects.md',
|
||||
'src/webhooks.md',
|
||||
)
|
||||
|
||||
manual = custom_target(
|
||||
'manual',
|
||||
command: [
|
||||
mdbook,
|
||||
'build',
|
||||
'@SOURCE_ROOT@/doc/manual',
|
||||
'-d', meson.current_build_dir() / 'html'
|
||||
],
|
||||
depend_files: srcs,
|
||||
output: ['html'],
|
||||
build_by_default: true,
|
||||
)
|
||||
|
||||
install_subdir(
|
||||
manual.full_path(),
|
||||
install_dir: get_option('datadir') / 'doc/hydra',
|
||||
strip_directory: true,
|
||||
)
|
||||
@@ -208,7 +208,8 @@ Example configuration:
|
||||
<role_mapping>
|
||||
# Make all users in the hydra_admin group Hydra admins
|
||||
hydra_admin = admin
|
||||
# Allow all users in the dev group to restart jobs and cancel builds
|
||||
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
|
||||
dev = eval-jobset
|
||||
dev = restart-jobs
|
||||
dev = cancel-build
|
||||
</role_mapping>
|
||||
|
||||
@@ -15,12 +15,18 @@ and dependencies can be found:
|
||||
$ nix-shell
|
||||
```
|
||||
|
||||
of when flakes are enabled:
|
||||
|
||||
```console
|
||||
$ nix develop
|
||||
```
|
||||
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
[nix-shell]$ autoreconfPhase
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make
|
||||
[nix-shell]$ make -j$(nproc)
|
||||
```
|
||||
|
||||
You start a local database, the webserver, and other components with
|
||||
@@ -30,6 +36,8 @@ foreman:
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||
|
||||
You can run just the Hydra web server in your source tree as follows:
|
||||
|
||||
```console
|
||||
|
||||
@@ -42,7 +42,7 @@ Sets CircleCI status.
|
||||
|
||||
## Compress build logs
|
||||
|
||||
Compresses build logs after a build with bzip2.
|
||||
Compresses build logs after a build with bzip2 or zstd.
|
||||
|
||||
### Configuration options
|
||||
|
||||
@@ -50,6 +50,14 @@ Compresses build logs after a build with bzip2.
|
||||
|
||||
Enable log compression
|
||||
|
||||
- `compress_build_logs_compression`
|
||||
|
||||
Which compression format to use. Valid values are bzip2 (default) and zstd.
|
||||
|
||||
- `compress_build_logs_silent`
|
||||
|
||||
Whether to compress logs silently.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
# Webhooks
|
||||
|
||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
||||
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||
jobset has a github repo in its input.
|
||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
||||
click on `Add webhook`.
|
||||
|
||||
## GitHub
|
||||
|
||||
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||
and in the `Webhooks` tab click on `Add webhook`.
|
||||
|
||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||
- In `Content type` switch to `application/json`.
|
||||
@@ -11,3 +14,14 @@ click on `Add webhook`.
|
||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
## Gitea
|
||||
|
||||
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||
|
||||
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||
- Change the branch filter to match the git branch hydra builds.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# jobset example file. This file canbe referenced as Nix expression
|
||||
# jobset example file. This file can be referenced as Nix expression
|
||||
# in a jobset configuration along with inputs for nixpkgs and the
|
||||
# repository containing this file.
|
||||
#
|
||||
|
||||
108
flake.lock
generated
108
flake.lock
generated
@@ -1,114 +1,68 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"lowdown-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"lowdown-src": "lowdown-src",
|
||||
"flake-compat": [],
|
||||
"flake-parts": [],
|
||||
"git-hooks-nix": [],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
"nixpkgs-23-11": [],
|
||||
"nixpkgs-regression": []
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1706208340,
|
||||
"narHash": "sha256-wNyHUEIiKKVs6UXrUzhP7RSJQv0A8jckgcuylzftl8k=",
|
||||
"lastModified": 1739899400,
|
||||
"narHash": "sha256-q/RgA4bB7zWai4oPySq9mch7qH14IEeom2P64SXdqHs=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "2c4bb93ba5a97e7078896ebc36385ce172960e4e",
|
||||
"rev": "e310c19a1aeb1ce1ed4d41d5ab2d02db596e0918",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "2.19-maintenance",
|
||||
"ref": "2.26-maintenance",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-eval-jobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1739500569,
|
||||
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"rev": "4b392b284877d203ae262e16af269f702df036bc",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1701615100,
|
||||
"narHash": "sha256-7VI84NGBvlCTduw2aHLVB62NvCiZUlALLqBe5v684Aw=",
|
||||
"lastModified": 1739461644,
|
||||
"narHash": "sha256-1o1qR0KYozYGRrnqytSpAhVBYLNBHX+Lv6I39zGRzKM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e9f06adb793d1cca5384907b3b8a4071d5d7cb19",
|
||||
"rev": "97a719c9f0a07923c957cf51b20b329f9fb9d43f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.05",
|
||||
"ref": "nixos-24.11-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-for-fileset": {
|
||||
"locked": {
|
||||
"lastModified": 1706098335,
|
||||
"narHash": "sha256-r3dWjT8P9/Ah5m5ul4WqIWD8muj5F+/gbCdjiNVBKmU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a77ab169a83a4175169d78684ddd2e54486ac651",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-regression": {
|
||||
"locked": {
|
||||
"lastModified": 1643052045,
|
||||
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-for-fileset": "nixpkgs-for-fileset"
|
||||
"nix-eval-jobs": "nix-eval-jobs",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
394
flake.nix
394
flake.nix
@@ -1,79 +1,44 @@
|
||||
{
|
||||
description = "A Nix-based continuous build system";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05";
|
||||
inputs.nix.url = "github:NixOS/nix/2.19-maintenance";
|
||||
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small";
|
||||
|
||||
# TODO get rid of this once https://github.com/NixOS/nix/pull/9546 is
|
||||
# mered and we upgrade or Nix, so the main `nixpkgs` input is at least
|
||||
# 23.11 and has `lib.fileset`.
|
||||
inputs.nixpkgs-for-fileset.url = "github:NixOS/nixpkgs/nixos-23.11";
|
||||
inputs.nix = {
|
||||
url = "github:NixOS/nix/2.26-maintenance";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
outputs = { self, nixpkgs, nix, nixpkgs-for-fileset }:
|
||||
# hide nix dev tooling from our lock file
|
||||
inputs.flake-parts.follows = "";
|
||||
inputs.git-hooks-nix.follows = "";
|
||||
inputs.nixpkgs-regression.follows = "";
|
||||
inputs.nixpkgs-23-11.follows = "";
|
||||
inputs.flake-compat.follows = "";
|
||||
};
|
||||
|
||||
inputs.nix-eval-jobs = {
|
||||
url = "github:nix-community/nix-eval-jobs";
|
||||
# We want to control the deps precisely
|
||||
flake = false;
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }:
|
||||
let
|
||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
forEachSystem = nixpkgs.lib.genAttrs systems;
|
||||
|
||||
overlayList = [ self.overlays.default nix.overlays.default ];
|
||||
|
||||
pkgsBySystem = forEachSystem (system: import nixpkgs {
|
||||
inherit system;
|
||||
overlays = overlayList;
|
||||
});
|
||||
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [ self.nixosModules.hydraTest ];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
binaryCaches = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
rec {
|
||||
|
||||
# A Nixpkgs overlay that provides a 'hydra' package.
|
||||
overlays.default = final: prev: {
|
||||
|
||||
# Add LDAP dependencies that aren't currently found within nixpkgs.
|
||||
perlPackages = prev.perlPackages // {
|
||||
|
||||
PrometheusTiny = final.perlPackages.buildPerlPackage {
|
||||
pname = "Prometheus-Tiny";
|
||||
version = "0.007";
|
||||
src = final.fetchurl {
|
||||
url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-0.007.tar.gz";
|
||||
sha256 = "0ef8b226a2025cdde4df80129dd319aa29e884e653c17dc96f4823d985c028ec";
|
||||
};
|
||||
buildInputs = with final.perlPackages; [ HTTPMessage Plack TestException ];
|
||||
meta = {
|
||||
homepage = "https://github.com/robn/Prometheus-Tiny";
|
||||
description = "A tiny Prometheus client";
|
||||
license = with final.lib.licenses; [ artistic1 gpl1Plus ];
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
nix-eval-jobs = final.callPackage nix-eval-jobs {};
|
||||
hydra = final.callPackage ./package.nix {
|
||||
inherit (nixpkgs-for-fileset.lib) fileset;
|
||||
inherit (nixpkgs.lib) fileset;
|
||||
rawSrc = self;
|
||||
nix-perl-bindings = final.nixComponents.nix-perl-bindings;
|
||||
};
|
||||
};
|
||||
|
||||
hydraJobs = {
|
||||
|
||||
build = forEachSystem (system: packages.${system}.hydra);
|
||||
|
||||
buildNoTests = forEachSystem (system:
|
||||
@@ -82,297 +47,22 @@
|
||||
})
|
||||
);
|
||||
|
||||
manual = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
pkgs.runCommand "hydra-manual-${pkgs.hydra.version}" { }
|
||||
manual = forEachSystem (system: let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
hydra = self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
in
|
||||
pkgs.runCommand "hydra-manual-${hydra.version}" { }
|
||||
''
|
||||
mkdir -p $out/share
|
||||
cp -prvd ${pkgs.hydra}/share/doc $out/share/
|
||||
cp -prvd ${hydra.doc}/share/doc $out/share/
|
||||
|
||||
mkdir $out/nix-support
|
||||
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
||||
'');
|
||||
|
||||
tests.install = forEachSystem (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
tests.notifications = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
tests.gitea = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nix = {
|
||||
distributedBuilds = true;
|
||||
buildMachines = [{
|
||||
hostName = "localhost";
|
||||
systems = [ system ];
|
||||
}];
|
||||
binaryCaches = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
disableRegistration = true;
|
||||
httpPort = 3001;
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly two status updates for latest commit!"
|
||||
assert response[0]['status'] == "success", "Expected latest status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected first status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
tests.validate-openapi = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
tests = import ./nixos-tests.nix {
|
||||
inherit forEachSystem nixpkgs nixosModules;
|
||||
};
|
||||
|
||||
container = nixosConfigurations.container.config.system.build.toplevel;
|
||||
};
|
||||
@@ -384,18 +74,34 @@
|
||||
});
|
||||
|
||||
packages = forEachSystem (system: {
|
||||
hydra = pkgsBySystem.${system}.hydra;
|
||||
default = pkgsBySystem.${system}.hydra;
|
||||
nix-eval-jobs = nixpkgs.legacyPackages.${system}.callPackage nix-eval-jobs {
|
||||
nix = nix.packages.${system}.nix;
|
||||
};
|
||||
hydra = nixpkgs.legacyPackages.${system}.callPackage ./package.nix {
|
||||
inherit (nixpkgs.lib) fileset;
|
||||
inherit (self.packages.${system}) nix-eval-jobs;
|
||||
rawSrc = self;
|
||||
inherit (nix.packages.${system})
|
||||
nix-util
|
||||
nix-store
|
||||
nix-main
|
||||
nix-cmd
|
||||
nix-cli
|
||||
;
|
||||
nix-perl-bindings = nix.hydraJobs.perlBindings.${system};
|
||||
};
|
||||
default = self.packages.${system}.hydra;
|
||||
});
|
||||
|
||||
nixosModules = import ./nixos-modules {
|
||||
overlays = overlayList;
|
||||
inherit self;
|
||||
};
|
||||
|
||||
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules =
|
||||
[
|
||||
self.nixosModules.hydra
|
||||
self.nixosModules.hydraTest
|
||||
self.nixosModules.hydraProxy
|
||||
{
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
mdbook serve \
|
||||
exec mdbook serve \
|
||||
--port 63332 \
|
||||
--dest-dir ./.hydra-data/manual \
|
||||
./doc/manual/
|
||||
|
||||
@@ -70,7 +70,7 @@ paths:
|
||||
$ref: '#/components/examples/projects-success'
|
||||
|
||||
/api/push:
|
||||
put:
|
||||
post:
|
||||
summary: trigger jobsets
|
||||
parameters:
|
||||
- in: query
|
||||
|
||||
40
meson.build
Normal file
40
meson.build
Normal file
@@ -0,0 +1,40 @@
|
||||
project('hydra', 'cpp',
|
||||
version: files('version.txt'),
|
||||
license: 'GPL-3.0',
|
||||
default_options: [
|
||||
'debug=true',
|
||||
'optimization=2',
|
||||
'cpp_std=c++20',
|
||||
],
|
||||
)
|
||||
|
||||
nix_util_dep = dependency('nix-util', required: true)
|
||||
nix_store_dep = dependency('nix-store', required: true)
|
||||
nix_main_dep = dependency('nix-main', required: true)
|
||||
|
||||
# Nix need extra flags not provided in its pkg-config files.
|
||||
nix_dep = declare_dependency(
|
||||
dependencies: [
|
||||
nix_util_dep,
|
||||
nix_store_dep,
|
||||
nix_main_dep,
|
||||
],
|
||||
compile_args: [
|
||||
'-include', 'nix/config-util.hh',
|
||||
'-include', 'nix/config-store.hh',
|
||||
'-include', 'nix/config-main.hh',
|
||||
],
|
||||
)
|
||||
|
||||
pqxx_dep = dependency('libpqxx', required: true)
|
||||
|
||||
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
|
||||
|
||||
mdbook = find_program('mdbook', native: true)
|
||||
perl = find_program('perl', native: true)
|
||||
|
||||
subdir('doc/manual')
|
||||
subdir('nixos-modules')
|
||||
subdir('src')
|
||||
subdir('t')
|
||||
@@ -1,14 +1,13 @@
|
||||
{ overlays }:
|
||||
{ self }:
|
||||
|
||||
rec {
|
||||
hydra = {
|
||||
{
|
||||
hydra = { pkgs, lib,... }: {
|
||||
_file = ./default.nix;
|
||||
imports = [ ./hydra.nix ];
|
||||
nixpkgs = { inherit overlays; };
|
||||
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
};
|
||||
|
||||
hydraTest = { pkgs, ... }: {
|
||||
imports = [ hydra ];
|
||||
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
@@ -16,7 +15,7 @@ rec {
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_11;
|
||||
services.postgresql.package = pkgs.postgresql_12;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
|
||||
@@ -68,8 +68,6 @@ in
|
||||
|
||||
package = mkOption {
|
||||
type = types.path;
|
||||
default = pkgs.hydra;
|
||||
defaultText = literalExpression "pkgs.hydra";
|
||||
description = "The Hydra package.";
|
||||
};
|
||||
|
||||
@@ -233,7 +231,7 @@ in
|
||||
gc-keep-outputs = true;
|
||||
gc-keep-derivations = true;
|
||||
};
|
||||
|
||||
|
||||
services.hydra-dev.extraConfig =
|
||||
''
|
||||
using_frontend_proxy = 1
|
||||
@@ -340,6 +338,7 @@ in
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
@@ -408,6 +407,7 @@ in
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
path = [ pkgs.zstd ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||
@@ -458,10 +458,17 @@ in
|
||||
# logs automatically after a step finishes, but this doesn't work
|
||||
# if the queue runner is stopped prematurely.
|
||||
systemd.services.hydra-compress-logs =
|
||||
{ path = [ pkgs.bzip2 ];
|
||||
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||
script =
|
||||
''
|
||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
|
||||
set -eou pipefail
|
||||
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||
if [[ $compression == "" ]]; then
|
||||
compression="bzip2"
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compression="zstd --rm"
|
||||
fi
|
||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r "$compression" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
};
|
||||
|
||||
4
nixos-modules/meson.build
Normal file
4
nixos-modules/meson.build
Normal file
@@ -0,0 +1,4 @@
|
||||
install_data('hydra.nix',
|
||||
install_dir: get_option('datadir') / 'nix',
|
||||
rename: ['hydra-module.nix'],
|
||||
)
|
||||
307
nixos-tests.nix
Normal file
307
nixos-tests.nix
Normal file
@@ -0,0 +1,307 @@
|
||||
{ forEachSystem, nixpkgs, nixosModules }:
|
||||
|
||||
let
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
nixosModules.hydra
|
||||
nixosModules.hydraTest
|
||||
];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
install = forEachSystem (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
notifications = forEachSystem (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${config.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
gitea = forEachSystem (system:
|
||||
let pkgs = nixpkgs.legacyPackages.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||
nix = {
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
settings = {
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
server.HTTP_PORT = 3001;
|
||||
};
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
validate-openapi = forEachSystem (system:
|
||||
let pkgs = nixpkgs.legacyPackages.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
|
||||
}
|
||||
66
package.nix
66
package.nix
@@ -8,11 +8,17 @@
|
||||
|
||||
, perlPackages
|
||||
|
||||
, nix
|
||||
, nix-util
|
||||
, nix-store
|
||||
, nix-main
|
||||
, nix-cmd
|
||||
, nix-cli
|
||||
, nix-perl-bindings
|
||||
, git
|
||||
|
||||
, makeWrapper
|
||||
, autoreconfHook
|
||||
, meson
|
||||
, ninja
|
||||
, nukeReferences
|
||||
, pkg-config
|
||||
, mdbook
|
||||
@@ -48,6 +54,7 @@
|
||||
, xz
|
||||
, gnutar
|
||||
, gnused
|
||||
, nix-eval-jobs
|
||||
|
||||
, rpm
|
||||
, dpkg
|
||||
@@ -59,7 +66,7 @@ let
|
||||
name = "hydra-perl-deps";
|
||||
paths = lib.closePropagation
|
||||
([
|
||||
nix.perl-bindings
|
||||
nix-perl-bindings
|
||||
git
|
||||
] ++ (with perlPackages; [
|
||||
AuthenSASL
|
||||
@@ -90,6 +97,7 @@ let
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileCopyRecursive
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
@@ -137,32 +145,28 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
src = fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = fileset.unions ([
|
||||
./version.txt
|
||||
./configure.ac
|
||||
./Makefile.am
|
||||
./src
|
||||
./doc
|
||||
./nixos-modules/hydra.nix
|
||||
# These are always needed to appease Automake
|
||||
./t/Makefile.am
|
||||
./t/jobs/config.nix.in
|
||||
./t/jobs/declarative/project.json.in
|
||||
] ++ lib.optionals finalAttrs.doCheck [
|
||||
./meson.build
|
||||
./nixos-modules
|
||||
./src
|
||||
./t
|
||||
./version.txt
|
||||
./.perlcriticrc
|
||||
./.yath.rc
|
||||
]);
|
||||
};
|
||||
|
||||
outputs = [ "out" "doc" ];
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
autoreconfHook
|
||||
meson
|
||||
ninja
|
||||
nukeReferences
|
||||
pkg-config
|
||||
mdbook
|
||||
nix
|
||||
nix-cli
|
||||
perlDeps
|
||||
perl
|
||||
unzip
|
||||
@@ -172,7 +176,10 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
libpqxx
|
||||
openssl
|
||||
libxslt
|
||||
nix
|
||||
nix-util
|
||||
nix-store
|
||||
nix-main
|
||||
nix-cmd
|
||||
perlDeps
|
||||
perl
|
||||
boost
|
||||
@@ -191,6 +198,7 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
openldap
|
||||
postgresql_13
|
||||
pixz
|
||||
nix-eval-jobs
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
@@ -198,13 +206,14 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
python3
|
||||
nix-cli
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
nix
|
||||
nix-cli
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
@@ -219,15 +228,22 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
nix-eval-jobs
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
mesonBuildType = "release";
|
||||
|
||||
postPatch = ''
|
||||
patchShebangs .
|
||||
'';
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
@@ -237,14 +253,11 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
NIX_LDFLAGS = [ "-lpthread" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
doCheck = true;
|
||||
|
||||
mesonCheckFlags = [ "--verbose" ];
|
||||
|
||||
preCheck = ''
|
||||
patchShebangs .
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
@@ -261,12 +274,13 @@ stdenv.mkDerivation (finalAttrs: {
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nix.name or "unknown"}
|
||||
--set NIX_RELEASE ${nix-cli.name or "unknown"} \
|
||||
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||
passthru = { inherit perlDeps nix; };
|
||||
passthru = { inherit perlDeps; };
|
||||
})
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
213
src/hydra-build-step/hydra-build-step.cc
Normal file
213
src/hydra-build-step/hydra-build-step.cc
Normal file
@@ -0,0 +1,213 @@
|
||||
/* This is a helper program that performs a build step, i.e. a single
|
||||
derivation. In addition to a derivation path, it takes three store
|
||||
URLs as arguments:
|
||||
|
||||
* --store: The store that will hold the resulting store paths
|
||||
(typically a binary cache).
|
||||
|
||||
* --eval-store: The store that holds the .drv files, as produced by
|
||||
hydra-evaluator.
|
||||
|
||||
* --build-store: The store that performs the build (often a
|
||||
SSHStore for remote builds).
|
||||
|
||||
The build log is written to the path indicated by --log-file.
|
||||
*/
|
||||
|
||||
#include "util.hh"
|
||||
#include "shared.hh"
|
||||
#include "common-eval-args.hh"
|
||||
#include "store-api.hh"
|
||||
#include "build-result.hh"
|
||||
#include "derivations.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
// FIXME: cut&paste
|
||||
static std::string_view getS(const std::vector<Logger::Field> & fields, size_t n)
|
||||
{
|
||||
assert(n < fields.size());
|
||||
assert(fields[n].type == Logger::Field::tString);
|
||||
return fields[n].s;
|
||||
}
|
||||
|
||||
void mainWrapped(std::list<std::string> args)
|
||||
{
|
||||
verbosity = lvlError;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
|
||||
{
|
||||
Path drvPath;
|
||||
std::optional<std::string> buildStoreUrl;
|
||||
std::optional<Path> logPath;
|
||||
std::optional<uint64_t> maxOutputSize;
|
||||
|
||||
MyArgs() : MixCommonArgs("hydra-build-step")
|
||||
{
|
||||
expectArg("drv-path", &drvPath);
|
||||
|
||||
addFlag({
|
||||
.longName = "build-store",
|
||||
.description = "The Nix store to use for building the derivation.",
|
||||
//.category = category,
|
||||
.labels = {"store-url"},
|
||||
.handler = {&buildStoreUrl},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "log-file",
|
||||
.description = "The path to the build log.",
|
||||
.labels = {"path"},
|
||||
.handler = {&logPath},
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "max-output-size",
|
||||
.description = "Maximum size of the outputs.",
|
||||
.labels = {"bytes"},
|
||||
.handler = {&maxOutputSize},
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/* A logger that intercepts all build log lines and writes them to
|
||||
the log file. */
|
||||
MyArgs myArgs;
|
||||
myArgs.parseCmdline(args);
|
||||
|
||||
struct MyLogger : public Logger
|
||||
{
|
||||
Logger & prev;
|
||||
AutoCloseFD logFile;
|
||||
|
||||
MyLogger(Logger & prev, Path logPath) : prev(prev)
|
||||
{
|
||||
logFile = open(logPath.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFile)
|
||||
throw SysError("creating log file '%s'", logPath);
|
||||
}
|
||||
|
||||
void log(Verbosity lvl, std::string_view s) override
|
||||
{ prev.log(lvl, s); }
|
||||
|
||||
void logEI(const ErrorInfo & ei) override
|
||||
{ prev.logEI(ei); }
|
||||
|
||||
void writeToStdout(std::string_view s) override
|
||||
{ prev.writeToStdout(s); }
|
||||
|
||||
void result(ActivityId act, ResultType type, const Fields & fields) override
|
||||
{
|
||||
if (type == resBuildLogLine)
|
||||
writeLine(logFile.get(), std::string(getS(fields, 0)));
|
||||
else
|
||||
prev.result(act, type, fields);
|
||||
}
|
||||
};
|
||||
|
||||
auto destStore = openStore();
|
||||
auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : destStore;
|
||||
auto buildStore = myArgs.buildStoreUrl ? openStore(*myArgs.buildStoreUrl) : destStore;
|
||||
|
||||
auto drvPath = evalStore->parseStorePath(myArgs.drvPath);
|
||||
|
||||
auto drv = evalStore->readDerivation(drvPath);
|
||||
BasicDerivation basicDrv(drv);
|
||||
|
||||
uint64_t overhead = 0;
|
||||
|
||||
/* Gather the inputs. */
|
||||
StorePathSet inputs;
|
||||
|
||||
for (auto & p : drv.inputSrcs)
|
||||
inputs.insert(p);
|
||||
|
||||
for (auto & [drvPath, node] : drv.inputDrvs.map) {
|
||||
auto drv2 = evalStore->readDerivation(drvPath);
|
||||
for (auto & name : node.value) {
|
||||
if (auto i = get(drv2.outputs, name)) {
|
||||
auto outPath = i->path(*evalStore, drv2.name, name);
|
||||
inputs.insert(*outPath);
|
||||
basicDrv.inputSrcs.insert(*outPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the inputs exist in the destination store (so that
|
||||
the builder can substitute them from the destination
|
||||
store). This is a no-op for regular stores, but for the binary
|
||||
cache store, this will copy the inputs to the binary cache from
|
||||
the local store. */
|
||||
{
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
debug("sending closure of '%s' to '%s'",
|
||||
evalStore->printStorePath(drvPath), destStore->getUri());
|
||||
|
||||
if (evalStore != destStore)
|
||||
copyClosure(*evalStore, *destStore, drv.inputSrcs, NoRepair, NoCheckSigs);
|
||||
|
||||
copyClosure(*destStore, *buildStore, inputs, NoRepair, NoCheckSigs, Substitute);
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Perform the build. */
|
||||
if (myArgs.logPath)
|
||||
logger = new MyLogger(*logger, *myArgs.logPath);
|
||||
|
||||
auto buildResult = buildStore->buildDerivation(drvPath, basicDrv);
|
||||
|
||||
/* Copy the output paths from the build store to the destination
|
||||
store. */
|
||||
size_t totalNarSize = 0;
|
||||
|
||||
if (buildResult.success()) {
|
||||
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
StorePathSet outputs;
|
||||
for (auto & [output, realisation] : buildResult.builtOutputs) {
|
||||
auto info = buildStore->queryPathInfo(realisation.outPath);
|
||||
totalNarSize += info->narSize;
|
||||
infos.insert_or_assign(info->path, *info);
|
||||
outputs.insert(info->path);
|
||||
}
|
||||
|
||||
if ((!myArgs.maxOutputSize || totalNarSize <= *myArgs.maxOutputSize)
|
||||
&& buildStore != destStore)
|
||||
{
|
||||
debug("copying outputs of '%s' from '%s' (%d bytes)",
|
||||
buildStore->printStorePath(drvPath), buildStore->getUri(), totalNarSize);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
copyPaths(*buildStore, *destStore, outputs, NoRepair, NoCheckSigs);
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
}
|
||||
|
||||
FdSink to { STDOUT_FILENO };
|
||||
WorkerProto::WriteConn wconn {
|
||||
.to = to,
|
||||
// Hardcode latest version because we are deploying hydra
|
||||
// itself atomically
|
||||
.version = PROTOCOL_VERSION,
|
||||
};
|
||||
WorkerProto::write(*evalStore, wconn, buildResult);
|
||||
}
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
initNix();
|
||||
mainWrapped(argvToStrings(argc, argv));
|
||||
});
|
||||
}
|
||||
14
src/hydra-build-step/meson.build
Normal file
14
src/hydra-build-step/meson.build
Normal file
@@ -0,0 +1,14 @@
|
||||
srcs = files(
|
||||
'hydra-build-step.cc',
|
||||
)
|
||||
|
||||
hydra_build_step = executable('hydra-build-step',
|
||||
'hydra-build-step.cc',
|
||||
srcs,
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_dep,
|
||||
dependency('nix-cmd', required: true)
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-eval-jobs
|
||||
|
||||
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
|
||||
hydra_eval_jobs_LDADD = $(NIX_LIBS) -lnixcmd
|
||||
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra
|
||||
@@ -1,579 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "eval-settings.hh"
|
||||
#include "signals.hh"
|
||||
#include "terminal.hh"
|
||||
#include "util.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "globals.hh"
|
||||
#include "common-eval-args.hh"
|
||||
#include "flake/flakeref.hh"
|
||||
#include "flake/flake.hh"
|
||||
#include "attr-path.hh"
|
||||
#include "derivations.hh"
|
||||
#include "local-fs-store.hh"
|
||||
|
||||
#include "hydra-config.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/resource.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
void check_pid_status_nonblocking(pid_t check_pid)
|
||||
{
|
||||
// Only check 'initialized' and known PID's
|
||||
if (check_pid <= 0) { return; }
|
||||
|
||||
int wstatus = 0;
|
||||
pid_t pid = waitpid(check_pid, &wstatus, WNOHANG);
|
||||
// -1 = failure, WNOHANG: 0 = no change
|
||||
if (pid <= 0) { return; }
|
||||
|
||||
std::cerr << "child process (" << pid << ") ";
|
||||
|
||||
if (WIFEXITED(wstatus)) {
|
||||
std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl;
|
||||
} else if (WIFSIGNALED(wstatus)) {
|
||||
std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl;
|
||||
} else if (WIFSTOPPED(wstatus)) {
|
||||
std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl;
|
||||
} else if (WIFCONTINUED(wstatus)) {
|
||||
std::cerr << "continued" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
using namespace nix;
|
||||
|
||||
static Path gcRootsDir;
|
||||
static size_t maxMemorySize;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
|
||||
{
|
||||
Path releaseExpr;
|
||||
bool flake = false;
|
||||
bool dryRun = false;
|
||||
|
||||
MyArgs() : MixCommonArgs("hydra-eval-jobs")
|
||||
{
|
||||
addFlag({
|
||||
.longName = "gc-roots-dir",
|
||||
.description = "garbage collector roots directory",
|
||||
.labels = {"path"},
|
||||
.handler = {&gcRootsDir}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "dry-run",
|
||||
.description = "don't create store derivations",
|
||||
.handler = {&dryRun, true}
|
||||
});
|
||||
|
||||
addFlag({
|
||||
.longName = "flake",
|
||||
.description = "build a flake",
|
||||
.handler = {&flake, true}
|
||||
});
|
||||
|
||||
expectArg("expr", &releaseExpr);
|
||||
}
|
||||
};
|
||||
|
||||
static MyArgs myArgs;
|
||||
|
||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||
{
|
||||
Strings res;
|
||||
std::function<void(Value & v)> rec;
|
||||
|
||||
rec = [&](Value & v) {
|
||||
state.forceValue(v, noPos);
|
||||
if (v.type() == nString)
|
||||
res.emplace_back(v.string_view());
|
||||
else if (v.isList())
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
rec(*v.listElems()[n]);
|
||||
else if (v.type() == nAttrs) {
|
||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs->end())
|
||||
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
|
||||
}
|
||||
};
|
||||
|
||||
Value * v = drv.queryMeta(name);
|
||||
if (v) rec(*v);
|
||||
|
||||
return concatStringsSep(", ", res);
|
||||
}
|
||||
|
||||
static void worker(
|
||||
EvalState & state,
|
||||
Bindings & autoArgs,
|
||||
AutoCloseFD & to,
|
||||
AutoCloseFD & from)
|
||||
{
|
||||
Value vTop;
|
||||
|
||||
if (myArgs.flake) {
|
||||
using namespace flake;
|
||||
|
||||
auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
|
||||
|
||||
auto vFlake = state.allocValue();
|
||||
|
||||
auto lockedFlake = lockFlake(state, flakeRef,
|
||||
LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = false,
|
||||
.allowUnlocked = false,
|
||||
});
|
||||
|
||||
callFlake(state, lockedFlake, *vFlake);
|
||||
|
||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
||||
state.forceValue(*vOutputs, noPos);
|
||||
|
||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
||||
if (!aHydraJobs)
|
||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
||||
if (!aHydraJobs)
|
||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
||||
|
||||
vTop = *aHydraJobs->value;
|
||||
|
||||
} else {
|
||||
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
|
||||
}
|
||||
|
||||
auto vRoot = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, vTop, *vRoot);
|
||||
|
||||
while (true) {
|
||||
/* Wait for the master to send us a job name. */
|
||||
writeLine(to.get(), "next");
|
||||
|
||||
auto s = readLine(from.get());
|
||||
if (s == "exit") break;
|
||||
if (!hasPrefix(s, "do ")) abort();
|
||||
std::string attrPath(s, 3);
|
||||
|
||||
debug("worker process %d at '%s'", getpid(), attrPath);
|
||||
|
||||
/* Evaluate it and send info back to the master. */
|
||||
nlohmann::json reply;
|
||||
|
||||
try {
|
||||
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
|
||||
|
||||
auto v = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, *vTmp, *v);
|
||||
|
||||
if (auto drv = getDerivation(state, *v, false)) {
|
||||
|
||||
// CA derivations do not have static output paths, so we
|
||||
// have to defensively not query output paths in case we
|
||||
// encounter one.
|
||||
DrvInfo::Outputs outputs = drv->queryOutputs(
|
||||
!experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||
|
||||
if (drv->querySystem() == "unknown")
|
||||
throw EvalError("derivation must have a 'system' attribute");
|
||||
|
||||
auto drvPath = state.store->printStorePath(drv->requireDrvPath());
|
||||
|
||||
nlohmann::json job;
|
||||
|
||||
job["nixName"] = drv->queryName();
|
||||
job["system"] =drv->querySystem();
|
||||
job["drvPath"] = drvPath;
|
||||
job["description"] = drv->queryMetaString("description");
|
||||
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
|
||||
job["homepage"] = drv->queryMetaString("homepage");
|
||||
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
|
||||
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
|
||||
job["timeout"] = drv->queryMetaInt("timeout", 36000);
|
||||
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
|
||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
||||
|
||||
/* If this is an aggregate, then get its constituents. */
|
||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
|
||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
||||
if (!a)
|
||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
||||
|
||||
NixStringContext context;
|
||||
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
|
||||
for (auto & c : context)
|
||||
std::visit(overloaded {
|
||||
[&](const NixStringContextElem::Built & b) {
|
||||
job["constituents"].push_back(b.drvPath->to_string(*state.store));
|
||||
},
|
||||
[&](const NixStringContextElem::Opaque & o) {
|
||||
},
|
||||
[&](const NixStringContextElem::DrvDeep & d) {
|
||||
},
|
||||
}, c.raw);
|
||||
|
||||
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
|
||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||
auto v = a->value->listElems()[n];
|
||||
state.forceValue(*v, noPos);
|
||||
if (v->type() == nString)
|
||||
job["namedConstituents"].push_back(v->string_view());
|
||||
}
|
||||
}
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (gcRootsDir != "" && localStore) {
|
||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
||||
if (!pathExists(root))
|
||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
||||
}
|
||||
|
||||
nlohmann::json out;
|
||||
for (auto & [outputName, optOutputPath] : outputs) {
|
||||
if (optOutputPath) {
|
||||
out[outputName] = state.store->printStorePath(*optOutputPath);
|
||||
} else {
|
||||
// See the `queryOutputs` call above; we should
|
||||
// not encounter missing output paths otherwise.
|
||||
assert(experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||
out[outputName] = nullptr;
|
||||
}
|
||||
}
|
||||
job["outputs"] = std::move(out);
|
||||
reply["job"] = std::move(job);
|
||||
}
|
||||
|
||||
else if (v->type() == nAttrs) {
|
||||
auto attrs = nlohmann::json::array();
|
||||
StringSet ss;
|
||||
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
|
||||
std::string name(state.symbols[i->name]);
|
||||
if (name.find(' ') != std::string::npos) {
|
||||
printError("skipping job with illegal name '%s'", name);
|
||||
continue;
|
||||
}
|
||||
attrs.push_back(name);
|
||||
}
|
||||
reply["attrs"] = std::move(attrs);
|
||||
}
|
||||
|
||||
else if (v->type() == nNull)
|
||||
;
|
||||
|
||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
||||
|
||||
} catch (EvalError & e) {
|
||||
auto msg = e.msg();
|
||||
// Transmits the error we got from the previous evaluation
|
||||
// in the JSON output.
|
||||
reply["error"] = filterANSIEscapes(msg, true);
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
printError(msg);
|
||||
}
|
||||
|
||||
writeLine(to.get(), reply.dump());
|
||||
|
||||
/* If our RSS exceeds the maximum, exit. The master will
|
||||
start a new process. */
|
||||
struct rusage r;
|
||||
getrusage(RUSAGE_SELF, &r);
|
||||
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
|
||||
}
|
||||
|
||||
writeLine(to.get(), "restart");
|
||||
}
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
/* Prevent undeclared dependencies in the evaluation via
|
||||
$NIX_PATH. */
|
||||
unsetenv("NIX_PATH");
|
||||
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
|
||||
auto config = std::make_unique<HydraConfig>();
|
||||
|
||||
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
|
||||
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
|
||||
|
||||
initNix();
|
||||
initGC();
|
||||
|
||||
myArgs.parseCmdline(argvToStrings(argc, argv));
|
||||
|
||||
auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake);
|
||||
|
||||
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
|
||||
settings.builders = "";
|
||||
|
||||
/* Prevent access to paths outside of the Nix search path and
|
||||
to the environment. */
|
||||
evalSettings.restrictEval = true;
|
||||
|
||||
/* When building a flake, use pure evaluation (no access to
|
||||
'getEnv', 'currentSystem' etc. */
|
||||
evalSettings.pureEval = pureEval;
|
||||
|
||||
if (myArgs.dryRun) settings.readOnlyMode = true;
|
||||
|
||||
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
|
||||
|
||||
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
|
||||
|
||||
struct State
|
||||
{
|
||||
std::set<std::string> todo{""};
|
||||
std::set<std::string> active;
|
||||
nlohmann::json jobs;
|
||||
std::exception_ptr exc;
|
||||
};
|
||||
|
||||
std::condition_variable wakeup;
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
/* Start a handler thread per worker process. */
|
||||
auto handler = [&]()
|
||||
{
|
||||
pid_t pid = -1;
|
||||
try {
|
||||
AutoCloseFD from, to;
|
||||
|
||||
while (true) {
|
||||
|
||||
/* Start a new worker process if necessary. */
|
||||
if (pid == -1) {
|
||||
Pipe toPipe, fromPipe;
|
||||
toPipe.create();
|
||||
fromPipe.create();
|
||||
pid = startProcess(
|
||||
[&,
|
||||
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
|
||||
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
|
||||
]()
|
||||
{
|
||||
try {
|
||||
EvalState state(myArgs.searchPath, openStore());
|
||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
||||
worker(state, autoArgs, *to, *from);
|
||||
} catch (Error & e) {
|
||||
nlohmann::json err;
|
||||
auto msg = e.msg();
|
||||
err["error"] = filterANSIEscapes(msg, true);
|
||||
printError(msg);
|
||||
writeLine(to->get(), err.dump());
|
||||
// Don't forget to print it into the STDERR log, this is
|
||||
// what's shown in the Hydra UI.
|
||||
writeLine(to->get(), "restart");
|
||||
}
|
||||
},
|
||||
ProcessOptions { .allowVfork = false });
|
||||
from = std::move(fromPipe.readSide);
|
||||
to = std::move(toPipe.writeSide);
|
||||
debug("created worker process %d", pid);
|
||||
}
|
||||
|
||||
/* Check whether the existing worker process is still there. */
|
||||
auto s = readLine(from.get());
|
||||
if (s == "restart") {
|
||||
pid = -1;
|
||||
continue;
|
||||
} else if (s != "next") {
|
||||
auto json = nlohmann::json::parse(s);
|
||||
throw Error("worker error: %s", (std::string) json["error"]);
|
||||
}
|
||||
|
||||
/* Wait for a job name to become available. */
|
||||
std::string attrPath;
|
||||
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
auto state(state_.lock());
|
||||
if ((state->todo.empty() && state->active.empty()) || state->exc) {
|
||||
writeLine(to.get(), "exit");
|
||||
return;
|
||||
}
|
||||
if (!state->todo.empty()) {
|
||||
attrPath = *state->todo.begin();
|
||||
state->todo.erase(state->todo.begin());
|
||||
state->active.insert(attrPath);
|
||||
break;
|
||||
} else
|
||||
state.wait(wakeup);
|
||||
}
|
||||
|
||||
/* Tell the worker to evaluate it. */
|
||||
writeLine(to.get(), "do " + attrPath);
|
||||
|
||||
/* Wait for the response. */
|
||||
auto response = nlohmann::json::parse(readLine(from.get()));
|
||||
|
||||
/* Handle the response. */
|
||||
StringSet newAttrs;
|
||||
|
||||
if (response.find("job") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath] = response["job"];
|
||||
}
|
||||
|
||||
if (response.find("attrs") != response.end()) {
|
||||
for (auto & i : response["attrs"]) {
|
||||
std::string path = i;
|
||||
if (path.find(".") != std::string::npos){
|
||||
path = "\"" + path + "\"";
|
||||
}
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path;
|
||||
newAttrs.insert(s);
|
||||
}
|
||||
}
|
||||
|
||||
if (response.find("error") != response.end()) {
|
||||
auto state(state_.lock());
|
||||
state->jobs[attrPath]["error"] = response["error"];
|
||||
}
|
||||
|
||||
/* Add newly discovered job names to the queue. */
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->active.erase(attrPath);
|
||||
for (auto & s : newAttrs)
|
||||
state->todo.insert(s);
|
||||
wakeup.notify_all();
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
check_pid_status_nonblocking(pid);
|
||||
auto state(state_.lock());
|
||||
state->exc = std::current_exception();
|
||||
wakeup.notify_all();
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
for (size_t i = 0; i < nrWorkers; i++)
|
||||
threads.emplace_back(std::thread(handler));
|
||||
|
||||
for (auto & thread : threads)
|
||||
thread.join();
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
if (state->exc)
|
||||
std::rethrow_exception(state->exc);
|
||||
|
||||
/* For aggregate jobs that have named consistuents
|
||||
(i.e. constituents that are a job name rather than a
|
||||
derivation), look up the referenced job and add it to the
|
||||
dependencies of the aggregate derivation. */
|
||||
auto store = openStore();
|
||||
|
||||
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
|
||||
auto jobName = i.key();
|
||||
auto & job = i.value();
|
||||
|
||||
auto named = job.find("namedConstituents");
|
||||
if (named == job.end()) continue;
|
||||
|
||||
std::unordered_map<std::string, std::string> brokenJobs;
|
||||
auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state](
|
||||
const std::string & childJobName) -> std::optional<nlohmann::json> {
|
||||
auto childJob = state->jobs.find(childJobName);
|
||||
if (childJob == state->jobs.end()) {
|
||||
printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName);
|
||||
brokenJobs[childJobName] = "does not exist";
|
||||
return std::nullopt;
|
||||
}
|
||||
if (childJob->find("error") != childJob->end()) {
|
||||
std::string error = (*childJob)["error"];
|
||||
printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error);
|
||||
brokenJobs[childJobName] = error;
|
||||
return std::nullopt;
|
||||
}
|
||||
return *childJob;
|
||||
};
|
||||
|
||||
if (myArgs.dryRun) {
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = getNonBrokenJobOrRecordError(jobName2);
|
||||
if (!job2) {
|
||||
continue;
|
||||
}
|
||||
std::string drvPath2 = (*job2)["drvPath"];
|
||||
job["constituents"].push_back(drvPath2);
|
||||
}
|
||||
} else {
|
||||
auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
|
||||
auto drv = store->readDerivation(drvPath);
|
||||
|
||||
for (std::string jobName2 : *named) {
|
||||
auto job2 = getNonBrokenJobOrRecordError(jobName2);
|
||||
if (!job2) {
|
||||
continue;
|
||||
}
|
||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||
auto drv2 = store->readDerivation(drvPath2);
|
||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||
drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first};
|
||||
}
|
||||
|
||||
if (brokenJobs.empty()) {
|
||||
std::string drvName(drvPath.name());
|
||||
assert(hasSuffix(drvName, drvExtension));
|
||||
drvName.resize(drvName.size() - drvExtension.size());
|
||||
|
||||
auto hashModulo = hashDerivationModulo(*store, drv, true);
|
||||
if (hashModulo.kind != DrvHash::Kind::Regular) continue;
|
||||
auto h = hashModulo.hashes.find("out");
|
||||
if (h == hashModulo.hashes.end()) continue;
|
||||
auto outPath = store->makeOutputPath("out", h->second, drvName);
|
||||
drv.env["out"] = store->printStorePath(outPath);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath });
|
||||
auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
|
||||
|
||||
debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
|
||||
|
||||
job["drvPath"] = newDrvPath;
|
||||
job["outputs"]["out"] = store->printStorePath(outPath);
|
||||
}
|
||||
}
|
||||
|
||||
job.erase("namedConstituents");
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
auto localStore = store.dynamic_pointer_cast<LocalFSStore>();
|
||||
if (gcRootsDir != "" && localStore) {
|
||||
auto drvPath = job["drvPath"].get<std::string>();
|
||||
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
|
||||
if (!pathExists(root))
|
||||
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
|
||||
}
|
||||
|
||||
if (!brokenJobs.empty()) {
|
||||
std::stringstream ss;
|
||||
for (const auto& [jobName, error] : brokenJobs) {
|
||||
ss << jobName << ": " << error << "\n";
|
||||
}
|
||||
job["error"] = ss.str();
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << state->jobs.dump(2) << "\n";
|
||||
});
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
bin_PROGRAMS = hydra-evaluator
|
||||
|
||||
hydra_evaluator_SOURCES = hydra-evaluator.cc
|
||||
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
|
||||
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
||||
@@ -38,7 +38,7 @@ class JobsetId {
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||
|
||||
9
src/hydra-evaluator/meson.build
Normal file
9
src/hydra-evaluator/meson.build
Normal file
@@ -0,0 +1,9 @@
|
||||
hydra_evaluator = executable('hydra-evaluator',
|
||||
'hydra-evaluator.cc',
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_dep,
|
||||
pqxx_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -1,8 +0,0 @@
|
||||
bin_PROGRAMS = hydra-queue-runner
|
||||
|
||||
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
|
||||
builder.cc build-result.cc build-remote.cc \
|
||||
hydra-build-result.hh counter.hh state.hh db.hh \
|
||||
nar-extractor.cc nar-extractor.hh
|
||||
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core
|
||||
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations
|
||||
@@ -1,433 +1,27 @@
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <math.h>
|
||||
|
||||
#include "build-result.hh"
|
||||
#include "path.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "state.hh"
|
||||
#include "current-process.hh"
|
||||
#include "processes.hh"
|
||||
#include "util.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "ssh.hh"
|
||||
#include "finally.hh"
|
||||
#include "url.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
static void append(Strings & dst, const Strings & src)
|
||||
{
|
||||
dst.insert(dst.end(), src.begin(), src.end());
|
||||
}
|
||||
|
||||
namespace nix::build_remote {
|
||||
|
||||
static Strings extraStoreArgs(std::string & machine)
|
||||
{
|
||||
Strings result;
|
||||
try {
|
||||
auto parsed = parseURL(machine);
|
||||
if (parsed.scheme != "ssh") {
|
||||
throw SysError("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
machine = parsed.authority.value_or("");
|
||||
auto remoteStore = parsed.query.find("remote-store");
|
||||
if (remoteStore != parsed.query.end()) {
|
||||
result = {"--store", shellEscape(remoteStore->second)};
|
||||
}
|
||||
} catch (BadURL &) {
|
||||
// We just try to continue with `machine->sshName` here for backwards compat.
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void openConnection(::Machine::ptr machine, Path tmpDir, int stderrFD, SSHMaster::Connection & child)
|
||||
{
|
||||
std::string pgmName;
|
||||
Pipe to, from;
|
||||
to.create();
|
||||
from.create();
|
||||
|
||||
Strings argv;
|
||||
if (machine->isLocalhost()) {
|
||||
pgmName = "nix-store";
|
||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
||||
} else {
|
||||
pgmName = "ssh";
|
||||
auto sshName = machine->sshName;
|
||||
Strings extraArgs = extraStoreArgs(sshName);
|
||||
argv = {"ssh", sshName};
|
||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||
if (machine->sshPublicHostKey != "") {
|
||||
Path fileName = tmpDir + "/host-key";
|
||||
auto p = machine->sshName.find("@");
|
||||
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
|
||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||
}
|
||||
append(argv,
|
||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
, "--", "nix-store", "--serve", "--write" });
|
||||
append(argv, extraArgs);
|
||||
}
|
||||
|
||||
child.sshPid = startProcess([&]() {
|
||||
restoreProcessContext();
|
||||
|
||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup input pipe to stdin");
|
||||
|
||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup output pipe to stdout");
|
||||
|
||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr");
|
||||
|
||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
||||
|
||||
throw SysError("cannot start %s", pgmName);
|
||||
});
|
||||
|
||||
to.readSide = -1;
|
||||
from.writeSide = -1;
|
||||
|
||||
child.in = to.writeSide.release();
|
||||
child.out = from.readSide.release();
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
{
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
|
||||
/* Send the "query valid paths" command with the "lock" option
|
||||
enabled. This prevents a race where the remote host
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
conn.to << ServeProto::Command::QueryValidPaths << 1 << useSubstitutes;
|
||||
ServeProto::write(destStore, conn, closure);
|
||||
conn.to.flush();
|
||||
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = ServeProto::Serialise<StorePathSet>::read(destStore, conn);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
auto sorted = destStore.topoSortPaths(closure);
|
||||
|
||||
StorePathSet missing;
|
||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||
if (!present.count(*i)) missing.insert(*i);
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
|
||||
std::function<void(const StorePath & path)> dfsVisit;
|
||||
|
||||
dfsVisit = [&](const StorePath & path) {
|
||||
if (!visited.insert(path).second) return;
|
||||
|
||||
auto info = paths.find(path);
|
||||
auto references = info == paths.end() ? StorePathSet() : info->second.references;
|
||||
|
||||
for (auto & i : references)
|
||||
/* Don't traverse into paths that don't exist. That can
|
||||
happen due to substitutes for non-existent paths. */
|
||||
if (i != path && paths.count(i))
|
||||
dfsVisit(i);
|
||||
|
||||
sorted.push_back(path);
|
||||
};
|
||||
|
||||
for (auto & i : paths)
|
||||
dfsVisit(i.first);
|
||||
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
static Path createLogFileDir(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
/**
|
||||
* @param conn is not fully initialized; it is this functions job to set
|
||||
* the `remoteVersion` field after the handshake is completed.
|
||||
* Therefore, no `ServeProto::Serialize` functions can be used until
|
||||
* that field is set.
|
||||
*/
|
||||
static void handshake(::Machine::Connection & conn, unsigned int repeats)
|
||||
{
|
||||
conn.to << SERVE_MAGIC_1 << 0x206;
|
||||
conn.to.flush();
|
||||
|
||||
unsigned int magic = readInt(conn.from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", conn.machine->sshName);
|
||||
conn.remoteVersion = readInt(conn.from);
|
||||
// Now `conn` is initialized.
|
||||
if (GET_PROTOCOL_MAJOR(conn.remoteVersion) != 0x200)
|
||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", conn.machine->sshName);
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 3 && repeats > 0)
|
||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", conn.machine->sshName);
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const State::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.to << ServeProto::Command::BuildDerivation << localStore.printStorePath(drvPath);
|
||||
writeDerivation(conn.to, localStore, drv);
|
||||
conn.to << options.maxSilentTime << options.buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 2)
|
||||
conn.to << options.maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) {
|
||||
conn.to
|
||||
<< options.repeats // == build-repeat
|
||||
<< options.enforceDeterminism;
|
||||
}
|
||||
conn.to.flush();
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::map<StorePath, ValidPathInfo> queryPathInfos(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePathSet & outputs,
|
||||
size_t & totalNarSize
|
||||
)
|
||||
{
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
conn.to << ServeProto::Command::QueryPathInfos;
|
||||
ServeProto::write(localStore, conn, outputs);
|
||||
conn.to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(conn.from);
|
||||
if (storePathS == "") break;
|
||||
auto deriver = readString(conn.from); // deriver
|
||||
auto references = ServeProto::Serialise<StorePathSet>::read(localStore, conn);
|
||||
readLongLong(conn.from); // download size
|
||||
auto narSize = readLongLong(conn.from);
|
||||
auto narHash = Hash::parseAny(readString(conn.from), htSHA256);
|
||||
auto ca = ContentAddress::parseOpt(readString(conn.from));
|
||||
readStrings<StringSet>(conn.from); // sigs
|
||||
ValidPathInfo info(localStore.parseStorePath(storePathS), narHash);
|
||||
assert(outputs.count(info.path));
|
||||
info.references = references;
|
||||
info.narSize = narSize;
|
||||
totalNarSize += info.narSize;
|
||||
info.narHash = narHash;
|
||||
info.ca = ca;
|
||||
if (deriver != "")
|
||||
info.deriver = localStore.parseStorePath(deriver);
|
||||
infos.insert_or_assign(info.path, info);
|
||||
}
|
||||
|
||||
return infos;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, ValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(conn, narMembers, localStore, destStore, info);
|
||||
}
|
||||
|
||||
return logFile;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -436,11 +30,14 @@ static void copyPathsFromRemote(
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
// FIXME: make RemoteResult inherit BuildResult.
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
errorMsg = buildResult.errorMsg;
|
||||
isNonDeterministic = buildResult.isNonDeterministic;
|
||||
if (buildResult.startTime && buildResult.stopTime) {
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
}
|
||||
|
||||
switch ((BuildResult::Status) buildResult.status) {
|
||||
case BuildResult::Built:
|
||||
@@ -492,32 +89,51 @@ void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const BuildOptions & buildOptions,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::TimedOut == 8);
|
||||
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
|
||||
nix::Path tmpDir = createTempDir();
|
||||
AutoDelete tmpDirDel(tmpDir, true);
|
||||
result.logFile = build_remote::createLogFileDir(logDir, step->drvPath);
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
updateStep(ssBuilding);
|
||||
result.startTime = time(0);
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
SSHMaster::Connection child;
|
||||
build_remote::openConnection(machine, tmpDir, logFD.get(), child);
|
||||
auto buildStoreUrl = machine->completeStoreReference().render();
|
||||
|
||||
Strings args = {
|
||||
localStore->printStorePath(step->drvPath),
|
||||
"--store", destStore->getUri(),
|
||||
"--eval-store", localStore->getUri(),
|
||||
"--build-store", buildStoreUrl,
|
||||
"--max-silent-time", std::to_string(buildOptions.maxSilentTime),
|
||||
"--timeout", std::to_string(buildOptions.buildTimeout),
|
||||
"--max-build-log-size", std::to_string(buildOptions.maxLogSize),
|
||||
"--max-output-size", std::to_string(maxOutputSize),
|
||||
"--repeat", std::to_string(buildOptions.nrRepeats),
|
||||
"--log-file", result.logFile,
|
||||
// FIXME: step->isDeterministic
|
||||
};
|
||||
|
||||
// FIXME: set pid for cancellation
|
||||
|
||||
auto [status, childStdout] = [&]() {
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
return runProgram({
|
||||
.program = "hydra-build-step",
|
||||
.args = std::move(args),
|
||||
});
|
||||
}();
|
||||
|
||||
#if 0
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child.sshPid;
|
||||
activeStepState->pid = conn.store->getConnectionPid();
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@@ -531,66 +147,32 @@ void State::buildRemote(ref<Store> destStore,
|
||||
possibility that we end up killing another
|
||||
process. Meh. */
|
||||
});
|
||||
#endif
|
||||
|
||||
::Machine::Connection conn {
|
||||
.from = child.out.get(),
|
||||
.to = child.in.get(),
|
||||
.machine = machine,
|
||||
};
|
||||
result.stopTime = time(0);
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
try {
|
||||
build_remote::handshake(conn, buildOptions.repeats);
|
||||
} catch (EndOfFile & e) {
|
||||
child.sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||
}
|
||||
if (!statusOk(status))
|
||||
throw ExecError(status, fmt("hydra-build-step %s with output:\n%s", statusToString(status), stdout));
|
||||
|
||||
/* The build was executed successfully, so clear the failure
|
||||
count for this machine. */
|
||||
{
|
||||
auto info(machine->state->connectInfo.lock());
|
||||
info->consecutiveFailures = 0;
|
||||
}
|
||||
|
||||
/* Gather the inputs. If the remote side is Nix <= 1.9, we have to
|
||||
copy the entire closure of ‘drvPath’, as well as the required
|
||||
outputs of the input derivations. On Nix > 1.9, we only need to
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
StringSource from { childStdout };
|
||||
/* Read the BuildResult from the child. */
|
||||
WorkerProto::ReadConn rconn {
|
||||
.from = from,
|
||||
// Hardcode latest version because we are deploying hydra
|
||||
// itself atomically
|
||||
.version = PROTOCOL_VERSION,
|
||||
};
|
||||
result.overhead += readNum<uint64_t>(rconn.from);
|
||||
auto totalNarSize = readNum<uint64_t>(rconn.from);
|
||||
auto buildResult = WorkerProto::Serialise<BuildResult>::read(*localStore, rconn);
|
||||
|
||||
logFileDel.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
if (ftruncate(logFD.get(), 0) == -1)
|
||||
throw SysError("truncating log file ‘%s’", result.logFile);
|
||||
|
||||
logFD = -1;
|
||||
|
||||
/* Do the build. */
|
||||
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath),
|
||||
machine->sshName);
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
BuildResult buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
|
||||
result.updateWithBuildResult(buildResult);
|
||||
|
||||
@@ -598,64 +180,22 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
result.errorMsg = "";
|
||||
|
||||
/* If the NAR size limit was exceeded, then hydra-build-step
|
||||
will not have copied the output paths. */
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the path was substituted or already valid, then we didn't
|
||||
get a build log. */
|
||||
if (result.isCached) {
|
||||
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render());
|
||||
unlink(result.logFile.c_str());
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
|
||||
MaintainCount<counter> mc(nrStepsCopyingFrom);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
size_t totalNarSize = 0;
|
||||
auto infos = build_remote::queryPathInfos(conn, *localStore, outputs, totalNarSize);
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Copy each path. */
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child.in = -1;
|
||||
child.sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
passed. This period increases on every consecutive
|
||||
@@ -668,7 +208,7 @@ void State::buildRemote(ref<Store> destStore,
|
||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||
info->lastFailure = now;
|
||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->sshName, delta);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
|
||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||
}
|
||||
throw;
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "finally.hh"
|
||||
#include "terminal.hh"
|
||||
#include "binary-cache-store.hh"
|
||||
|
||||
using namespace nix;
|
||||
@@ -41,7 +42,7 @@ void State::builder(MachineReservation::ptr reservation)
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(reservation->step->drvPath),
|
||||
reservation->machine->sshName,
|
||||
reservation->machine->storeUri.render(),
|
||||
e.what());
|
||||
}
|
||||
}
|
||||
@@ -98,10 +99,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
BuildOptions buildOptions;
|
||||
buildOptions.repeats = step->isDeterministic ? 1 : 0;
|
||||
buildOptions.maxLogSize = maxLogSize;
|
||||
buildOptions.enforceDeterminism = step->isDeterministic;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
@@ -136,7 +140,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
buildOptions.repeats = std::max(buildOptions.repeats, i->second);
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
@@ -147,7 +151,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), buildOptions.repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
if (!buildOneDone)
|
||||
@@ -175,7 +179,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
unlink(result.logFile.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -193,7 +197,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(*conn);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -216,7 +220,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
result.canRetry = false;
|
||||
} else {
|
||||
result.stepStatus = bsAborted;
|
||||
result.errorMsg = e.msg();
|
||||
result.errorMsg = filterANSIEscapes(e.msg(), true);
|
||||
result.canRetry = true;
|
||||
}
|
||||
}
|
||||
@@ -250,7 +254,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
/* Finish the step in the database. */
|
||||
if (stepNr) {
|
||||
pqxx::work txn(*conn);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -258,7 +262,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
issue). Retry a number of times. */
|
||||
if (result.canRetry) {
|
||||
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
|
||||
assert(stepNr);
|
||||
bool retry;
|
||||
{
|
||||
@@ -449,7 +453,7 @@ void State::failStep(
|
||||
build->finishedInDB)
|
||||
continue;
|
||||
createBuildStep(txn,
|
||||
0, build->id, step, machine ? machine->sshName : "",
|
||||
0, build->id, step, machine ? machine->storeUri.render() : "",
|
||||
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include <cmath>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "state.hh"
|
||||
|
||||
@@ -231,11 +232,11 @@ system_time State::doDispatch()
|
||||
sort(machinesSorted.begin(), machinesSorted.end(),
|
||||
[](const MachineInfo & a, const MachineInfo & b) -> bool
|
||||
{
|
||||
float ta = std::round(a.currentJobs / a.machine->speedFactorFloat);
|
||||
float tb = std::round(b.currentJobs / b.machine->speedFactorFloat);
|
||||
float ta = std::round(a.currentJobs / a.machine->speedFactor);
|
||||
float tb = std::round(b.currentJobs / b.machine->speedFactor);
|
||||
return
|
||||
ta != tb ? ta < tb :
|
||||
a.machine->speedFactorFloat != b.machine->speedFactorFloat ? a.machine->speedFactorFloat > b.machine->speedFactorFloat :
|
||||
a.machine->speedFactor != b.machine->speedFactor ? a.machine->speedFactor > b.machine->speedFactor :
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
@@ -255,7 +256,7 @@ system_time State::doDispatch()
|
||||
/* Can this machine do this step? */
|
||||
if (!mi.machine->supportsStep(step)) {
|
||||
debug("machine '%s' does not support step '%s' (system type '%s')",
|
||||
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
@@ -135,67 +135,26 @@ void State::parseMachines(const std::string & contents)
|
||||
oldMachines = *machines_;
|
||||
}
|
||||
|
||||
for (auto line : tokenizeString<Strings>(contents, "\n")) {
|
||||
line = trim(std::string(line, 0, line.find('#')));
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(line);
|
||||
if (tokens.size() < 3) continue;
|
||||
tokens.resize(8);
|
||||
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
auto supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
auto mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
|
||||
for (auto & f : mandatoryFeatures)
|
||||
supportedFeatures.insert(f);
|
||||
|
||||
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
||||
|
||||
auto machine = std::make_shared<::Machine>(nix::Machine {
|
||||
// `storeUri`, not yet used
|
||||
"",
|
||||
// `systemTypes`, not yet used
|
||||
{},
|
||||
// `sshKey`
|
||||
tokens[2] == "-" ? "" : tokens[2],
|
||||
// `maxJobs`
|
||||
tokens[3] != ""
|
||||
? string2Int<MaxJobs>(tokens[3]).value()
|
||||
: 1,
|
||||
// `speedFactor`, not yet used
|
||||
1,
|
||||
// `supportedFeatures`
|
||||
std::move(supportedFeatures),
|
||||
// `mandatoryFeatures`
|
||||
std::move(mandatoryFeatures),
|
||||
// `sshPublicHostKey`
|
||||
tokens[7] != "" && tokens[7] != "-"
|
||||
? base64Decode(tokens[7])
|
||||
: "",
|
||||
});
|
||||
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypesSet = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->speedFactorFloat = atof(tokens[4].c_str());
|
||||
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
|
||||
auto machine = std::make_shared<::Machine>(std::move(machine_));
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->sshName);
|
||||
auto i = oldMachines.find(machine->storeUri.variant);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->sshName);
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
|
||||
else
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName);
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<::Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->sshName] = machine;
|
||||
newMachines[machine->storeUri.variant] = machine;
|
||||
}
|
||||
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printInfo("removing machine ‘%1%’", m.first);
|
||||
printInfo("removing machine ‘%1%’", m.second->storeUri.render());
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
@@ -591,12 +550,11 @@ void State::dumpStatus(Connection & conn)
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
#if 0
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
#endif
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
@@ -641,7 +599,7 @@ void State::dumpStatus(Connection & conn)
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypesSet},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
@@ -659,7 +617,7 @@ void State::dumpStatus(Connection & conn)
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
}
|
||||
statusJson["machines"][m->sshName] = machine;
|
||||
statusJson["machines"][m->storeUri.render()] = machine;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -928,10 +886,17 @@ void State::run(BuildID buildOne)
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
|
||||
22
src/hydra-queue-runner/meson.build
Normal file
22
src/hydra-queue-runner/meson.build
Normal file
@@ -0,0 +1,22 @@
|
||||
srcs = files(
|
||||
'builder.cc',
|
||||
'build-remote.cc',
|
||||
'build-result.cc',
|
||||
'dispatcher.cc',
|
||||
'hydra-queue-runner.cc',
|
||||
'nar-extractor.cc',
|
||||
'queue-monitor.cc',
|
||||
)
|
||||
|
||||
hydra_queue_runner = executable('hydra-queue-runner',
|
||||
'hydra-queue-runner.cc',
|
||||
srcs,
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_dep,
|
||||
pqxx_dep,
|
||||
prom_cpp_core_dep,
|
||||
prom_cpp_pull_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -6,7 +6,46 @@
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct Extractor : ParseSink
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
@@ -15,65 +54,41 @@ struct Extractor : ParseSink
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
std::filesystem::path prefix;
|
||||
|
||||
Path toKey(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::path p = prefix;
|
||||
// Conditional to avoid trailing slash
|
||||
if (!path.isRoot()) p /= path.rel();
|
||||
return p;
|
||||
}
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
{ }
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
void createDirectory(const CanonPath & path) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(toKey(path), NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const std::string & target) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
|
||||
void isExecutable() override
|
||||
{ }
|
||||
|
||||
void closeRegularFile() override
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -10,8 +10,14 @@ using namespace nix;
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop();
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
} catch (std::exception & e) {
|
||||
printError("queue monitor: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
@@ -20,16 +26,14 @@ void State::queueMonitor()
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop()
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver buildsAdded(*conn, "builds_added");
|
||||
receiver buildsRestarted(*conn, "builds_restarted");
|
||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
||||
receiver buildsDeleted(*conn, "builds_deleted");
|
||||
receiver buildsBumped(*conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
@@ -39,17 +43,17 @@ void State::queueMonitorLoop()
|
||||
while (!quit) {
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
||||
bool done = getQueuedBuilds(conn, destStore, lastBuildId);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done && !quit) {
|
||||
conn->await_notification();
|
||||
conn.await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn->get_notifs();
|
||||
conn.get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||
@@ -61,11 +65,11 @@ void State::queueMonitorLoop()
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(*conn);
|
||||
processQueueChange(conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(*conn);
|
||||
processJobsetSharesChange(conn);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,7 +298,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
try {
|
||||
createBuild(build);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
||||
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||
throw;
|
||||
}
|
||||
|
||||
@@ -696,7 +700,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <regex>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
@@ -239,18 +238,6 @@ struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||
we are not yet used to, but once we are, we don't need this. */
|
||||
std::string sshName;
|
||||
|
||||
/* TODO Get rid once `nix::Machine::systemTypes` is a set not
|
||||
vector. */
|
||||
std::set<std::string> systemTypesSet;
|
||||
|
||||
/* TODO Get rid once `nix::Machine::systemTypes` is a `float` not
|
||||
an `int`. */
|
||||
float speedFactorFloat = 1.0;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
counter currentJobs{0};
|
||||
@@ -277,7 +264,7 @@ struct Machine : nix::Machine
|
||||
{
|
||||
/* Check that this machine is of the type required by the
|
||||
step. */
|
||||
if (!systemTypesSet.count(step->drv->platform == "builtin" ? nix::settings.thisSystem : step->drv->platform))
|
||||
if (!systemTypes.count(step->drv->platform == "builtin" ? nix::settings.thisSystem : step->drv->platform))
|
||||
return false;
|
||||
|
||||
/* Check that the step requires all mandatory features of this
|
||||
@@ -300,37 +287,7 @@ struct Machine : nix::Machine
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isLocalhost()
|
||||
{
|
||||
std::regex r("^(ssh://|ssh-ng://)?localhost$");
|
||||
return std::regex_search(sshName, r);
|
||||
}
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection {
|
||||
nix::FdSource from;
|
||||
nix::FdSink to;
|
||||
nix::ServeProto::Version remoteVersion;
|
||||
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
|
||||
operator nix::ServeProto::ReadConn ()
|
||||
{
|
||||
return {
|
||||
.from = from,
|
||||
.version = remoteVersion,
|
||||
};
|
||||
}
|
||||
|
||||
operator nix::ServeProto::WriteConn ()
|
||||
{
|
||||
return {
|
||||
.to = to,
|
||||
.version = remoteVersion,
|
||||
};
|
||||
}
|
||||
};
|
||||
bool isLocalhost() const;
|
||||
};
|
||||
|
||||
|
||||
@@ -384,7 +341,7 @@ private:
|
||||
|
||||
/* The build machines. */
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<std::string, Machine::ptr> Machines;
|
||||
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Various stats. */
|
||||
@@ -395,9 +352,10 @@ private:
|
||||
counter nrStepsStarted{0};
|
||||
counter nrStepsDone{0};
|
||||
counter nrStepsBuilding{0};
|
||||
#if 0
|
||||
counter nrStepsCopyingTo{0};
|
||||
counter nrStepsCopyingFrom{0};
|
||||
counter nrStepsWaiting{0};
|
||||
#endif
|
||||
counter nrUnsupportedSteps{0};
|
||||
counter nrRetries{0};
|
||||
counter maxNrRetries{0};
|
||||
@@ -406,8 +364,6 @@ private:
|
||||
counter nrQueueWakeups{0};
|
||||
counter nrDispatcherWakeups{0};
|
||||
counter dispatchTimeMs{0};
|
||||
counter bytesSent{0};
|
||||
counter bytesReceived{0};
|
||||
counter nrActiveDbUpdates{0};
|
||||
|
||||
/* Specific build to do for --build-one (testing only). */
|
||||
@@ -464,7 +420,7 @@ private:
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
@@ -493,12 +449,6 @@ private:
|
||||
public:
|
||||
State(std::optional<std::string> metricsAddrOpt);
|
||||
|
||||
struct BuildOptions {
|
||||
unsigned int maxSilentTime, buildTimeout, repeats;
|
||||
size_t maxLogSize;
|
||||
bool enforceDeterminism;
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
nix::MaintainCount<counter> startDbUpdate();
|
||||
@@ -531,7 +481,7 @@ private:
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop();
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn,
|
||||
@@ -583,7 +533,7 @@ private:
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
const BuildOptions & buildOptions,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
|
||||
@@ -4,7 +4,6 @@ use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use List::SomeUtils qw(any);
|
||||
use Nix::Store;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
||||
my $outputs = {};
|
||||
foreach my $output (@outputs) {
|
||||
my $outPath = $output->get_column("outpath");
|
||||
next if $checkValidity && !isValidPath($outPath);
|
||||
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||
$outputs->{$output->get_column("outname")} = $outPath;
|
||||
push @storePaths, $outPath;
|
||||
# Put the system type in the manifest (for top-level
|
||||
|
||||
@@ -95,6 +95,7 @@ sub get_legacy_ldap_config {
|
||||
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
},
|
||||
};
|
||||
@@ -159,6 +160,7 @@ sub valid_roles {
|
||||
"bump-to-front",
|
||||
"cancel-build",
|
||||
"create-projects",
|
||||
"eval-jobset",
|
||||
"restart-jobs",
|
||||
];
|
||||
}
|
||||
|
||||
@@ -239,6 +239,8 @@ sub triggerJobset {
|
||||
sub push : Chained('api') PathPart('push') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requirePost($c);
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $force = exists $c->request->query_params->{force};
|
||||
@@ -246,19 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
||||
foreach my $s (@jobsets) {
|
||||
my ($p, $j) = parseJobsetName($s);
|
||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
}
|
||||
|
||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||
foreach my $r (@repos) {
|
||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
||||
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{
|
||||
join => 'project',
|
||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||
order_by => 'me.id DESC'
|
||||
});
|
||||
foreach my $jobset (@jobsets) {
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
triggerJobset($self, $c, $jobset, $force)
|
||||
}
|
||||
}
|
||||
|
||||
$self->status_ok(
|
||||
@@ -285,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
@@ -10,11 +10,10 @@ use File::Basename;
|
||||
use File::LibMagic;
|
||||
use File::stat;
|
||||
use Data::Dump qw(dump);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use JSON::PP;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use feature 'state';
|
||||
|
||||
@@ -82,9 +81,9 @@ sub build_GET {
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { $_->path && isValidPath($_->path) } $build->buildoutputs->all
|
||||
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
@@ -141,7 +140,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -150,7 +149,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -235,6 +234,9 @@ sub serveFile {
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "regular") {
|
||||
# Have the hosted data considered its own origin to avoid being a giant
|
||||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
@@ -308,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
||||
error($c, "This build is not finished yet.") unless $build->finished;
|
||||
my $output = $build->buildoutputs->find({name => $outputName});
|
||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
||||
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||
|
||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
@@ -425,7 +427,7 @@ sub getDependencyGraph {
|
||||
};
|
||||
$$done{$path} = $node;
|
||||
my @refs;
|
||||
foreach my $ref (queryReferences($path)) {
|
||||
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||
next if $ref eq $path;
|
||||
next unless $runtime || $ref =~ /\.drv$/;
|
||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||
@@ -433,7 +435,7 @@ sub getDependencyGraph {
|
||||
}
|
||||
# Show in reverse topological order to flatten the graph.
|
||||
# Should probably do a proper BFS.
|
||||
my @sorted = reverse topoSortPaths(@refs);
|
||||
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||
}
|
||||
|
||||
@@ -446,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
||||
my $build = $c->stash->{build};
|
||||
my $drvPath = $build->drvpath;
|
||||
|
||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
||||
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||
|
||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||
|
||||
@@ -461,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
||||
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||
|
||||
my $done = {};
|
||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||
@@ -481,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
||||
if (isLocalStore) {
|
||||
foreach my $out ($build->buildoutputs) {
|
||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||
unless isValidPath($out->path);
|
||||
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ use List::Util qw[min max];
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
@@ -34,6 +35,7 @@ sub noLoginNeeded {
|
||||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
@@ -49,6 +51,7 @@ sub begin :Private {
|
||||
$c->stash->{curUri} = $c->request->uri;
|
||||
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
|
||||
$c->stash->{curTime} = time;
|
||||
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
||||
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
||||
@@ -79,7 +82,7 @@ sub begin :Private {
|
||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
@@ -328,7 +331,7 @@ sub nar :Local :Args(1) {
|
||||
else {
|
||||
$path = $Nix::Config::storeDir . "/$path";
|
||||
|
||||
gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path);
|
||||
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $path;
|
||||
@@ -366,7 +369,7 @@ sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
|
||||
else {
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = queryRawRealisation($rawDrvOutput);
|
||||
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
@@ -395,7 +398,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||
|
||||
die("Hash length was not 32") if length($hash) != 32;
|
||||
my $path = queryPathFromHashPart($hash);
|
||||
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||
|
||||
if (!$path) {
|
||||
$c->response->status(404);
|
||||
@@ -553,7 +556,7 @@ sub log :Local :Args(1) {
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ our @EXPORT = qw(
|
||||
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
||||
requireBumpPrivileges
|
||||
requireCancelBuildPrivileges
|
||||
requireEvalJobsetPrivileges
|
||||
trim
|
||||
getLatestFinishedEval getFirstEval
|
||||
paramToList
|
||||
@@ -186,6 +187,27 @@ sub isProjectOwner {
|
||||
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
||||
}
|
||||
|
||||
sub hasEvalJobsetRole {
|
||||
my ($c) = @_;
|
||||
return $c->user_exists && $c->check_user_roles("eval-jobset");
|
||||
}
|
||||
|
||||
sub mayEvalJobset {
|
||||
my ($c, $project) = @_;
|
||||
return
|
||||
$c->user_exists &&
|
||||
(isAdmin($c) ||
|
||||
hasEvalJobsetRole($c) ||
|
||||
isProjectOwner($c, $project));
|
||||
}
|
||||
|
||||
sub requireEvalJobsetPrivileges {
|
||||
my ($c, $project) = @_;
|
||||
requireUser($c);
|
||||
accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.")
|
||||
unless mayEvalJobset($c, $project);
|
||||
}
|
||||
|
||||
sub hasCancelBuildRole {
|
||||
my ($c) = @_;
|
||||
return $c->user_exists && $c->check_user_roles('cancel-build');
|
||||
@@ -272,7 +294,7 @@ sub requireAdmin {
|
||||
|
||||
sub requirePost {
|
||||
my ($c) = @_;
|
||||
error($c, "Request must be POSTed.") if $c->request->method ne "POST";
|
||||
error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST";
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -40,8 +40,11 @@ our @EXPORT = qw(
|
||||
registerRoot
|
||||
restartBuilds
|
||||
run
|
||||
$MACHINE_LOCAL_STORE
|
||||
);
|
||||
|
||||
our $MACHINE_LOCAL_STORE = Nix::Store->new();
|
||||
|
||||
|
||||
sub getHydraHome {
|
||||
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
||||
@@ -171,6 +174,9 @@ sub getDrvLogPath {
|
||||
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
||||
return $_ if -f $_;
|
||||
}
|
||||
for ($fn . $bucketed, $fn . $bucketed . ".zst") {
|
||||
return $_ if -f $_;
|
||||
}
|
||||
return undef;
|
||||
}
|
||||
|
||||
@@ -187,6 +193,10 @@ sub findLog {
|
||||
|
||||
return undef if scalar @outPaths == 0;
|
||||
|
||||
# Filter out any NULLs. Content-addressed derivations
|
||||
# that haven't built yet or failed to build may have a NULL outPath.
|
||||
@outPaths = grep {defined} @outPaths;
|
||||
|
||||
my @steps = $c->model('DB::BuildSteps')->search(
|
||||
{ path => { -in => [@outPaths] } },
|
||||
{ select => ["drvpath"]
|
||||
@@ -494,7 +504,7 @@ sub restartBuilds {
|
||||
$builds = $builds->search({ finished => 1 });
|
||||
|
||||
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
||||
next if !isValidPath($build->drvpath);
|
||||
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
registerRoot $build->drvpath;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -38,9 +37,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||
|
||||
@@ -9,11 +9,24 @@ use Hydra::Helper::CatalystUtils;
|
||||
sub stepFinished {
|
||||
my ($self, $step, $logPath) = @_;
|
||||
|
||||
my $doCompress = $self->{config}->{'compress_build_logs'} // "1";
|
||||
my $doCompress = $self->{config}->{'compress_build_logs'} // '1';
|
||||
my $silent = $self->{config}->{'compress_build_logs_silent'} // '0';
|
||||
my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2';
|
||||
|
||||
if ($doCompress eq "1" && -e $logPath) {
|
||||
print STDERR "compressing ‘$logPath’...\n";
|
||||
system("bzip2", "--force", $logPath);
|
||||
if (not -e $logPath or $doCompress ne "1") {
|
||||
return;
|
||||
}
|
||||
|
||||
if ($silent ne '1') {
|
||||
print STDERR "compressing '$logPath' with $compression...\n";
|
||||
}
|
||||
|
||||
if ($compression eq 'bzip2') {
|
||||
system('bzip2', '--force', $logPath);
|
||||
} elsif ($compression eq 'zstd') {
|
||||
system('zstd', '--rm', '--quiet', '-T0', $logPath);
|
||||
} else {
|
||||
print STDERR "unknown compression type '$compression'\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
||||
{uri => $uri, revision => $revision},
|
||||
{rows => 1});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -75,8 +74,8 @@ sub fetchInput {
|
||||
die "darcs changes --count failed" if $? != 0;
|
||||
|
||||
system "rm", "-rf", "$tmpDir/export/_darcs";
|
||||
$storePath = addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = queryPathHash($storePath);
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
|
||||
$sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
|
||||
@@ -186,9 +186,9 @@ sub fetchInput {
|
||||
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
||||
{rows => 1});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -217,7 +217,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
||||
|
||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||
|
||||
@@ -88,10 +88,6 @@ sub buildQueued {
|
||||
common(@_, [], 0);
|
||||
}
|
||||
|
||||
sub buildStarted {
|
||||
common(@_, [], 1);
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
common(@_, 2);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::Exec;
|
||||
use Nix::Store;
|
||||
use Fcntl qw(:flock);
|
||||
|
||||
sub supportedInputTypes {
|
||||
@@ -68,9 +67,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
||||
{uri => $uri, branch => $branch, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -85,7 +84,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||
|
||||
@@ -5,7 +5,6 @@ use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use POSIX qw(strftime);
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -30,7 +29,7 @@ sub fetchInput {
|
||||
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
||||
{rows => 1, order_by => "lastseen DESC"});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$timestamp = $cachedInput->timestamp;
|
||||
@@ -46,7 +45,7 @@ sub fetchInput {
|
||||
}
|
||||
chomp $storePath;
|
||||
|
||||
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
|
||||
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
|
||||
|
||||
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
||||
{srcpath => $uri, sha256hash => $sha256});
|
||||
|
||||
@@ -14,6 +14,7 @@ use Nix::Config;
|
||||
use Nix::Store;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::Nix;
|
||||
|
||||
sub isEnabled {
|
||||
my ($self) = @_;
|
||||
@@ -92,7 +93,7 @@ sub buildFinished {
|
||||
my $hash = substr basename($path), 0, 32;
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0);
|
||||
my $system;
|
||||
if (defined $deriver and isValidPath($deriver)) {
|
||||
if (defined $deriver and $MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||
$system = derivationFromPath($deriver)->{platform};
|
||||
}
|
||||
foreach my $reference (@{$refs}) {
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use IPC::Run;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -45,9 +44,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -62,16 +61,16 @@ sub fetchInput {
|
||||
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
||||
|
||||
if ($type eq "svn-checkout") {
|
||||
$storePath = addToStore($wcPath, 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
|
||||
} else {
|
||||
# Hm, if the Nix Perl bindings supported filters in
|
||||
# addToStore(), then we wouldn't need to make a copy here.
|
||||
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
||||
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
||||
$storePath = addToStore("$tmpDir/source", 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
|
||||
}
|
||||
|
||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||
|
||||
@@ -8,6 +8,7 @@ use MIME::Base64;
|
||||
use Nix::Manifest;
|
||||
use Nix::Store;
|
||||
use Nix::Utils;
|
||||
use Hydra::Helper::Nix;
|
||||
use base qw/Catalyst::View/;
|
||||
|
||||
sub process {
|
||||
@@ -17,7 +18,7 @@ sub process {
|
||||
|
||||
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
||||
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1);
|
||||
|
||||
my $info;
|
||||
$info .= "StorePath: $storePath\n";
|
||||
@@ -28,8 +29,8 @@ sub process {
|
||||
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
||||
if (defined $deriver) {
|
||||
$info .= "Deriver: " . basename $deriver . "\n";
|
||||
if (isValidPath($deriver)) {
|
||||
my $drv = derivationFromPath($deriver);
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||
my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver);
|
||||
$info .= "System: $drv->{platform}\n";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,10 @@ sub process {
|
||||
|
||||
my $tail = int($c->stash->{tail} // "0");
|
||||
|
||||
if ($logPath =~ /\.bz2$/) {
|
||||
if ($logPath =~ /\.zst$/) {
|
||||
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||
open($fh, "-|", "zstd -dc < '$logPath' $doTail") or die;
|
||||
} elsif ($logPath =~ /\.bz2$/) {
|
||||
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
|
||||
} else {
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
PERL_MODULES = \
|
||||
$(wildcard *.pm) \
|
||||
$(wildcard Hydra/*.pm) \
|
||||
$(wildcard Hydra/Helper/*.pm) \
|
||||
$(wildcard Hydra/Model/*.pm) \
|
||||
$(wildcard Hydra/View/*.pm) \
|
||||
$(wildcard Hydra/Schema/*.pm) \
|
||||
$(wildcard Hydra/Schema/Result/*.pm) \
|
||||
$(wildcard Hydra/Schema/ResultSet/*.pm) \
|
||||
$(wildcard Hydra/Controller/*.pm) \
|
||||
$(wildcard Hydra/Base/*.pm) \
|
||||
$(wildcard Hydra/Base/Controller/*.pm) \
|
||||
$(wildcard Hydra/Script/*.pm) \
|
||||
$(wildcard Hydra/Component/*.pm) \
|
||||
$(wildcard Hydra/Event/*.pm) \
|
||||
$(wildcard Hydra/Plugin/*.pm)
|
||||
|
||||
EXTRA_DIST = \
|
||||
$(PERL_MODULES)
|
||||
|
||||
hydradir = $(libexecdir)/hydra/lib
|
||||
nobase_hydra_DATA = $(PERL_MODULES)
|
||||
5
src/libhydra/meson.build
Normal file
5
src/libhydra/meson.build
Normal file
@@ -0,0 +1,5 @@
|
||||
libhydra_inc = include_directories('.')
|
||||
|
||||
libhydra_dep = declare_dependency(
|
||||
include_directories: [libhydra_inc],
|
||||
)
|
||||
86
src/meson.build
Normal file
86
src/meson.build
Normal file
@@ -0,0 +1,86 @@
|
||||
# Native code
|
||||
subdir('libhydra')
|
||||
subdir('hydra-build-step')
|
||||
subdir('hydra-evaluator')
|
||||
subdir('hydra-queue-runner')
|
||||
|
||||
hydra_libexecdir = get_option('libexecdir') / 'hydra'
|
||||
|
||||
# Data and interpreted
|
||||
foreach dir : ['lib', 'root']
|
||||
install_subdir(dir,
|
||||
install_dir: hydra_libexecdir,
|
||||
)
|
||||
endforeach
|
||||
subdir('sql')
|
||||
subdir('ttf')
|
||||
|
||||
# Static files for website
|
||||
|
||||
hydra_libexecdir_static = hydra_libexecdir / 'root' / 'static'
|
||||
|
||||
## Bootstrap
|
||||
|
||||
bootstrap_name = 'bootstrap-4.3.1-dist'
|
||||
bootstrap = custom_target(
|
||||
'extract-bootstrap',
|
||||
input: 'root' / (bootstrap_name + '.zip'),
|
||||
output: bootstrap_name,
|
||||
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||
)
|
||||
custom_target(
|
||||
'name-bootstrap',
|
||||
input: bootstrap,
|
||||
output: 'bootstrap',
|
||||
command: ['cp', '-r', '@INPUT@' , '@OUTPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static,
|
||||
)
|
||||
|
||||
## Flot
|
||||
|
||||
custom_target(
|
||||
'extract-flot',
|
||||
input: 'root' / 'flot-0.8.3.zip',
|
||||
output: 'flot',
|
||||
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static / 'js',
|
||||
)
|
||||
|
||||
## Fontawesome
|
||||
|
||||
fontawesome_name = 'fontawesome-free-5.10.2-web'
|
||||
fontawesome = custom_target(
|
||||
'extract-fontawesome',
|
||||
input: 'root' / (fontawesome_name + '.zip'),
|
||||
output: fontawesome_name,
|
||||
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
|
||||
)
|
||||
custom_target(
|
||||
'name-fontawesome-css',
|
||||
input: fontawesome,
|
||||
output: 'css',
|
||||
command: ['cp', '-r', '@INPUT@/css', '@OUTPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static / 'fontawesome',
|
||||
)
|
||||
custom_target(
|
||||
'name-fontawesome-webfonts',
|
||||
input: fontawesome,
|
||||
output: 'webfonts',
|
||||
command: ['cp', '-r', '@INPUT@/webfonts', '@OUTPUT@'],
|
||||
install: true,
|
||||
install_dir: hydra_libexecdir_static / 'fontawesome',
|
||||
)
|
||||
|
||||
# Scripts
|
||||
|
||||
install_subdir('script',
|
||||
install_dir: get_option('bindir'),
|
||||
exclude_files: [
|
||||
'hydra-dev-server',
|
||||
],
|
||||
install_mode: 'rwxr-xr-x',
|
||||
strip_directory: true,
|
||||
)
|
||||
@@ -1,39 +0,0 @@
|
||||
TEMPLATES = $(wildcard *.tt)
|
||||
STATIC = \
|
||||
$(wildcard static/images/*) \
|
||||
$(wildcard static/css/*) \
|
||||
static/js/bootbox.min.js \
|
||||
static/js/popper.min.js \
|
||||
static/js/common.js \
|
||||
static/js/jquery/jquery-3.4.1.min.js \
|
||||
static/js/jquery/jquery-ui-1.10.4.min.js
|
||||
|
||||
FLOT = flot-0.8.3.zip
|
||||
BOOTSTRAP = bootstrap-4.3.1-dist.zip
|
||||
FONTAWESOME = fontawesome-free-5.10.2-web.zip
|
||||
|
||||
ZIPS = $(FLOT) $(BOOTSTRAP) $(FONTAWESOME)
|
||||
|
||||
EXTRA_DIST = $(TEMPLATES) $(STATIC) $(ZIPS)
|
||||
|
||||
hydradir = $(libexecdir)/hydra/root
|
||||
nobase_hydra_DATA = $(EXTRA_DIST)
|
||||
|
||||
all:
|
||||
mkdir -p $(srcdir)/static/js
|
||||
unzip -u -d $(srcdir)/static $(BOOTSTRAP)
|
||||
rm -rf $(srcdir)/static/bootstrap
|
||||
mv $(srcdir)/static/$(basename $(BOOTSTRAP)) $(srcdir)/static/bootstrap
|
||||
unzip -u -d $(srcdir)/static/js $(FLOT)
|
||||
unzip -u -d $(srcdir)/static $(FONTAWESOME)
|
||||
rm -rf $(srcdir)/static/fontawesome
|
||||
mv $(srcdir)/static/$(basename $(FONTAWESOME)) $(srcdir)/static/fontawesome
|
||||
|
||||
install-data-local: $(ZIPS)
|
||||
mkdir -p $(hydradir)/static/js
|
||||
cp -prvd $(srcdir)/static/js/* $(hydradir)/static/js
|
||||
mkdir -p $(hydradir)/static/bootstrap
|
||||
cp -prvd $(srcdir)/static/bootstrap/* $(hydradir)/static/bootstrap
|
||||
mkdir -p $(hydradir)/static/fontawesome/{css,webfonts}
|
||||
cp -prvd $(srcdir)/static/fontawesome/css/* $(hydradir)/static/fontawesome/css
|
||||
cp -prvd $(srcdir)/static/fontawesome/webfonts/* $(hydradir)/static/fontawesome/webfonts
|
||||
@@ -33,7 +33,7 @@
|
||||
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog" role="document">
|
||||
<div class="modal-content">
|
||||
<form>
|
||||
<form id="signin-form">
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<label for="username" class="col-form-label">User name</label>
|
||||
@@ -45,7 +45,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
|
||||
<button type="submit" class="btn btn-primary">Sign in</button>
|
||||
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
||||
</div>
|
||||
</form>
|
||||
@@ -57,10 +57,11 @@
|
||||
|
||||
function finishSignOut() { }
|
||||
|
||||
$("#do-signin").click(function() {
|
||||
$("#signin-form").submit(function(e) {
|
||||
e.preventDefault();
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/login') %]",
|
||||
data: $(this).parents("form").serialize(),
|
||||
data: $(this).serialize(),
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
|
||||
@@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
|
||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||
[% IF bi1.type == "git" %]
|
||||
<tr><td>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 12) _ ' to ' _ bi2.revision.substr(0, 12)) %]</tt>
|
||||
</td></tr>
|
||||
[% ELSE %]
|
||||
<tr><td>
|
||||
|
||||
@@ -205,6 +205,7 @@
|
||||
if (!c) return;
|
||||
requestJSON({
|
||||
url: "[% HTML.escape(c.uri_for('/api/push', { jobsets = project.name _ ':' _ jobset.name, force = "1" })) %]",
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
bootbox.alert("The jobset has been scheduled for evaluation.");
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
<footer class="navbar">
|
||||
<hr />
|
||||
<small>
|
||||
<em><a href="http://nixos.org/hydra" target="_blank" class="squiggle">Hydra</a> [% HTML.escape(version) %] (using [% HTML.escape(nixVersion) %]).</em>
|
||||
<em><a href="http://nixos.org/hydra" target="_blank" class="squiggle">Hydra</a> [% HTML.escape(version) %] (using [% HTML.escape(nixVersion) %] and [% HTML.escape(nixEvalJobsVersion) %]).</em>
|
||||
[% IF c.user_exists %]
|
||||
You are signed in as <tt>[% HTML.escape(c.user.username) %]</tt>
|
||||
[%- IF c.user.type == 'google' %] via Google[% END %].
|
||||
|
||||
@@ -7,7 +7,7 @@ main() {
|
||||
|
||||
set -e
|
||||
|
||||
tmpDir=${TMPDIR:-/tmp}/build-[% build.id +%]
|
||||
tmpDir=$(realpath "${TMPDIR:-/tmp}")/build-[% build.id +%]
|
||||
declare -a args extraArgs
|
||||
|
||||
|
||||
|
||||
@@ -91,6 +91,7 @@
|
||||
[% INCLUDE roleoption mutable=mutable role="restart-jobs" %]
|
||||
[% INCLUDE roleoption mutable=mutable role="bump-to-front" %]
|
||||
[% INCLUDE roleoption mutable=mutable role="cancel-build" %]
|
||||
[% INCLUDE roleoption mutable=mutable role="eval-jobset" %]
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
EXTRA_DIST = \
|
||||
$(distributable_scripts)
|
||||
|
||||
distributable_scripts = \
|
||||
hydra-backfill-ids \
|
||||
hydra-init \
|
||||
hydra-eval-jobset \
|
||||
hydra-server \
|
||||
hydra-update-gc-roots \
|
||||
hydra-s3-backup-collect-garbage \
|
||||
hydra-create-user \
|
||||
hydra-notify \
|
||||
hydra-send-stats \
|
||||
nix-prefetch-git \
|
||||
nix-prefetch-bzr \
|
||||
nix-prefetch-hg
|
||||
|
||||
bin_SCRIPTS = \
|
||||
$(distributable_scripts)
|
||||
@@ -17,6 +17,7 @@ use Hydra::Helper::Nix;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Plugin;
|
||||
use Hydra::Schema;
|
||||
use IPC::Run;
|
||||
use JSON::MaybeXS;
|
||||
use Net::Statsd;
|
||||
use Nix::Store;
|
||||
@@ -85,14 +86,14 @@ sub attrsToSQL {
|
||||
# Fetch a store path from 'eval_substituter' if not already present.
|
||||
sub getPath {
|
||||
my ($path) = @_;
|
||||
return 1 if isValidPath($path);
|
||||
return 1 if $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
my $substituter = $config->{eval_substituter};
|
||||
|
||||
system("nix", "--experimental-features", "nix-command", "copy", "--from", $substituter, "--", $path)
|
||||
if defined $substituter;
|
||||
|
||||
return isValidPath($path);
|
||||
return $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
}
|
||||
|
||||
|
||||
@@ -143,7 +144,7 @@ sub fetchInputBuild {
|
||||
, version => $version
|
||||
, outputName => $mainOutput->name
|
||||
};
|
||||
if (isValidPath($prevBuild->drvpath)) {
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($prevBuild->drvpath)) {
|
||||
$result->{drvPath} = $prevBuild->drvpath;
|
||||
}
|
||||
|
||||
@@ -233,7 +234,7 @@ sub fetchInputEval {
|
||||
my $out = $build->buildoutputs->find({ name => "out" });
|
||||
next unless defined $out;
|
||||
# FIXME: Should we fail if the path is not valid?
|
||||
next unless isValidPath($out->path);
|
||||
next unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
$jobs->{$build->get_column('job')} = $out->path;
|
||||
}
|
||||
|
||||
@@ -357,22 +358,32 @@ sub evalJobs {
|
||||
my @cmd;
|
||||
|
||||
if (defined $flakeRef) {
|
||||
@cmd = ("hydra-eval-jobs",
|
||||
"--flake", $flakeRef,
|
||||
"--gc-roots-dir", getGCRootsDir,
|
||||
"--max-jobs", 1);
|
||||
my $nix_expr =
|
||||
"let " .
|
||||
"flake = builtins.getFlake (toString \"$flakeRef\"); " .
|
||||
"in " .
|
||||
"flake.hydraJobs " .
|
||||
"or flake.checks " .
|
||||
"or (throw \"flake '$flakeRef' does not provide any Hydra jobs or checks\")";
|
||||
|
||||
@cmd = ("nix-eval-jobs", "--expr", $nix_expr);
|
||||
} else {
|
||||
my $nixExprInput = $inputInfo->{$nixExprInputName}->[0]
|
||||
or die "cannot find the input containing the job expression\n";
|
||||
|
||||
@cmd = ("hydra-eval-jobs",
|
||||
@cmd = ("nix-eval-jobs",
|
||||
"<" . $nixExprInputName . "/" . $nixExprPath . ">",
|
||||
"--gc-roots-dir", getGCRootsDir,
|
||||
"--max-jobs", 1,
|
||||
inputsToArgs($inputInfo));
|
||||
}
|
||||
|
||||
push @cmd, "--no-allow-import-from-derivation" if $config->{allow_import_from_derivation} // "true" ne "true";
|
||||
push @cmd, ("--gc-roots-dir", getGCRootsDir);
|
||||
push @cmd, ("--max-jobs", 1);
|
||||
push @cmd, "--meta";
|
||||
push @cmd, "--constituents";
|
||||
push @cmd, "--force-recurse";
|
||||
push @cmd, ("--option", "allow-import-from-derivation", "false") if $config->{allow_import_from_derivation} // "true" ne "true";
|
||||
push @cmd, ("--workers", $config->{evaluator_workers} // 1);
|
||||
push @cmd, ("--max-memory-size", $config->{evaluator_max_memory_size} // 4096);
|
||||
|
||||
if (defined $ENV{'HYDRA_DEBUG'}) {
|
||||
sub escape {
|
||||
@@ -384,14 +395,40 @@ sub evalJobs {
|
||||
print STDERR "evaluator: @escaped\n";
|
||||
}
|
||||
|
||||
(my $res, my $jobsJSON, my $stderr) = captureStdoutStderr(21600, @cmd);
|
||||
die "hydra-eval-jobs returned " . ($res & 127 ? "signal $res" : "exit code " . ($res >> 8))
|
||||
. ":\n" . ($stderr ? decode("utf-8", $stderr) : "(no output)\n")
|
||||
if $res;
|
||||
my $evalProc = IPC::Run::start \@cmd,
|
||||
'>', IPC::Run::new_chunker, \my $out,
|
||||
'2>', \my $err;
|
||||
|
||||
print STDERR "$stderr";
|
||||
return sub {
|
||||
while (1) {
|
||||
$evalProc->pump;
|
||||
if (!defined $out && !defined $err) {
|
||||
$evalProc->finish;
|
||||
if ($?) {
|
||||
die "nix-eval-jobs returned " . ($? & 127 ? "signal $?" : "exit code " . ($? >> 8)) . "\n";
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
return decode_json($jobsJSON);
|
||||
if (defined $err) {
|
||||
print STDERR "$err";
|
||||
undef $err;
|
||||
}
|
||||
|
||||
if (defined $out && $out ne '') {
|
||||
my $job;
|
||||
try {
|
||||
$job = decode_json($out);
|
||||
} catch {
|
||||
warn "nix-eval-jobs sent invalid JSON.\n parse error: $_\n invalid json: $out\n";
|
||||
};
|
||||
undef $out;
|
||||
if (defined $job) {
|
||||
return $job;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -420,7 +457,7 @@ sub checkBuild {
|
||||
my $firstOutputName = $outputNames[0];
|
||||
my $firstOutputPath = $buildInfo->{outputs}->{$firstOutputName};
|
||||
|
||||
my $jobName = $buildInfo->{jobName} or die;
|
||||
my $jobName = $buildInfo->{attr} or die;
|
||||
my $drvPath = $buildInfo->{drvPath} or die;
|
||||
|
||||
my $build;
|
||||
@@ -474,9 +511,30 @@ sub checkBuild {
|
||||
|
||||
my $time = time();
|
||||
|
||||
sub null {
|
||||
my ($s) = @_;
|
||||
return $s eq "" ? undef : $s;
|
||||
sub getMeta {
|
||||
my ($s, $def) = @_;
|
||||
return ($s || "") eq "" ? $def : $s;
|
||||
}
|
||||
|
||||
sub getMetaStrings {
|
||||
my ($v, $k, $acc) = @_;
|
||||
my $t = ref $v;
|
||||
|
||||
if ($t eq 'HASH') {
|
||||
push @$acc, $v->{$k} if exists $v->{$k};
|
||||
} elsif ($t eq 'ARRAY') {
|
||||
getMetaStrings($_, $k, $acc) foreach @$v;
|
||||
} elsif (defined $v) {
|
||||
push @$acc, $v;
|
||||
}
|
||||
}
|
||||
|
||||
sub getMetaConcatStrings {
|
||||
my ($v, $k) = @_;
|
||||
|
||||
my @strings;
|
||||
getMetaStrings($v, $k, \@strings);
|
||||
return join(", ", @strings) || undef;
|
||||
}
|
||||
|
||||
# Add the build to the database.
|
||||
@@ -484,19 +542,19 @@ sub checkBuild {
|
||||
{ timestamp => $time
|
||||
, jobset_id => $jobset->id
|
||||
, job => $jobName
|
||||
, description => null($buildInfo->{description})
|
||||
, license => null($buildInfo->{license})
|
||||
, homepage => null($buildInfo->{homepage})
|
||||
, maintainers => null($buildInfo->{maintainers})
|
||||
, maxsilent => $buildInfo->{maxSilent}
|
||||
, timeout => $buildInfo->{timeout}
|
||||
, nixname => $buildInfo->{nixName}
|
||||
, description => getMeta($buildInfo->{meta}->{description}, undef)
|
||||
, license => getMetaConcatStrings($buildInfo->{meta}->{license}, "shortName")
|
||||
, homepage => getMeta($buildInfo->{meta}->{homepage}, undef)
|
||||
, maintainers => getMetaConcatStrings($buildInfo->{meta}->{maintainers}, "email")
|
||||
, maxsilent => getMeta($buildInfo->{meta}->{maxSilent}, 7200)
|
||||
, timeout => getMeta($buildInfo->{meta}->{timeout}, 36000)
|
||||
, nixname => $buildInfo->{name}
|
||||
, drvpath => $drvPath
|
||||
, system => $buildInfo->{system}
|
||||
, priority => $buildInfo->{schedulingPriority}
|
||||
, priority => getMeta($buildInfo->{meta}->{schedulingPriority}, 100)
|
||||
, finished => 0
|
||||
, iscurrent => 1
|
||||
, ischannel => $buildInfo->{isChannel}
|
||||
, ischannel => getMeta($buildInfo->{meta}->{isChannel}, 0)
|
||||
});
|
||||
|
||||
$build->buildoutputs->create({ name => $_, path => $buildInfo->{outputs}->{$_} })
|
||||
@@ -665,7 +723,7 @@ sub checkJobsetWrapped {
|
||||
return;
|
||||
}
|
||||
|
||||
# Hash the arguments to hydra-eval-jobs and check the
|
||||
# Hash the arguments to nix-eval-jobs and check the
|
||||
# JobsetInputHashes to see if the previous evaluation had the same
|
||||
# inputs. If so, bail out.
|
||||
my @args = ($jobset->nixexprinput // "", $jobset->nixexprpath // "", inputsToArgs($inputInfo));
|
||||
@@ -687,19 +745,12 @@ sub checkJobsetWrapped {
|
||||
|
||||
# Evaluate the job expression.
|
||||
my $evalStart = clock_gettime(CLOCK_MONOTONIC);
|
||||
my $jobs = evalJobs($project->name . ":" . $jobset->name, $inputInfo, $jobset->nixexprinput, $jobset->nixexprpath, $flakeRef);
|
||||
my $evalStop = clock_gettime(CLOCK_MONOTONIC);
|
||||
|
||||
if ($jobsetsJobset) {
|
||||
my @keys = keys %$jobs;
|
||||
die "The .jobsets jobset must only have a single job named 'jobsets'"
|
||||
unless (scalar @keys) == 1 && $keys[0] eq "jobsets";
|
||||
}
|
||||
Net::Statsd::timing("hydra.evaluator.eval_time", int(($evalStop - $evalStart) * 1000));
|
||||
my $evalStop;
|
||||
my $jobsIter = evalJobs($project->name . ":" . $jobset->name, $inputInfo, $jobset->nixexprinput, $jobset->nixexprpath, $flakeRef);
|
||||
|
||||
if ($dryRun) {
|
||||
foreach my $name (keys %{$jobs}) {
|
||||
my $job = $jobs->{$name};
|
||||
while (defined(my $job = $jobsIter->())) {
|
||||
my $name = $job->{attr};
|
||||
if (defined $job->{drvPath}) {
|
||||
print STDERR "good job $name: $job->{drvPath}\n";
|
||||
} else {
|
||||
@@ -709,36 +760,20 @@ sub checkJobsetWrapped {
|
||||
return;
|
||||
}
|
||||
|
||||
die "Jobset contains a job with an empty name. Make sure the jobset evaluates to an attrset of jobs.\n"
|
||||
if defined $jobs->{""};
|
||||
|
||||
$jobs->{$_}->{jobName} = $_ for keys %{$jobs};
|
||||
|
||||
my $jobOutPathMap = {};
|
||||
my $jobsetChanged = 0;
|
||||
my $dbStart = clock_gettime(CLOCK_MONOTONIC);
|
||||
|
||||
|
||||
# Store the error messages for jobs that failed to evaluate.
|
||||
my $evaluationErrorTime = time;
|
||||
my $evaluationErrorMsg = "";
|
||||
foreach my $job (values %{$jobs}) {
|
||||
next unless defined $job->{error};
|
||||
$evaluationErrorMsg .=
|
||||
($job->{jobName} ne "" ? "in job ‘$job->{jobName}’" : "at top-level") .
|
||||
":\n" . $job->{error} . "\n\n";
|
||||
}
|
||||
setJobsetError($jobset, $evaluationErrorMsg, $evaluationErrorTime);
|
||||
|
||||
my $evaluationErrorRecord = $db->resultset('EvaluationErrors')->create(
|
||||
{ errormsg => $evaluationErrorMsg
|
||||
, errortime => $evaluationErrorTime
|
||||
}
|
||||
);
|
||||
|
||||
my $jobOutPathMap = {};
|
||||
my $jobsetChanged = 0;
|
||||
my %buildMap;
|
||||
$db->txn_do(sub {
|
||||
|
||||
$db->txn_do(sub {
|
||||
my $prevEval = getPrevJobsetEval($db, $jobset, 1);
|
||||
|
||||
# Clear the "current" flag on all builds. Since we're in a
|
||||
@@ -751,7 +786,7 @@ sub checkJobsetWrapped {
|
||||
, evaluationerror => $evaluationErrorRecord
|
||||
, timestamp => time
|
||||
, checkouttime => abs(int($checkoutStop - $checkoutStart))
|
||||
, evaltime => abs(int($evalStop - $evalStart))
|
||||
, evaltime => 0
|
||||
, hasnewbuilds => 0
|
||||
, nrbuilds => 0
|
||||
, flake => $flakeRef
|
||||
@@ -759,11 +794,24 @@ sub checkJobsetWrapped {
|
||||
, nixexprpath => $jobset->nixexprpath
|
||||
});
|
||||
|
||||
# Schedule each successfully evaluated job.
|
||||
foreach my $job (permute(values %{$jobs})) {
|
||||
next if defined $job->{error};
|
||||
#print STDERR "considering job " . $project->name, ":", $jobset->name, ":", $job->{jobName} . "\n";
|
||||
checkBuild($db, $jobset, $ev, $inputInfo, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins);
|
||||
my @jobsWithConstituents;
|
||||
|
||||
while (defined(my $job = $jobsIter->())) {
|
||||
if ($jobsetsJobset) {
|
||||
die "The .jobsets jobset must only have a single job named 'jobsets'"
|
||||
unless $job->{attr} eq "jobsets";
|
||||
}
|
||||
|
||||
$evaluationErrorMsg .=
|
||||
($job->{attr} ne "" ? "in job ‘$job->{attr}’" : "at top-level") .
|
||||
":\n" . $job->{error} . "\n\n" if defined $job->{error};
|
||||
|
||||
checkBuild($db, $jobset, $ev, $inputInfo, $job, \%buildMap, $prevEval, $jobOutPathMap, $plugins)
|
||||
unless defined $job->{error};
|
||||
|
||||
if (defined $job->{constituents}) {
|
||||
push @jobsWithConstituents, $job;
|
||||
}
|
||||
}
|
||||
|
||||
# Have any builds been added or removed since last time?
|
||||
@@ -801,21 +849,20 @@ sub checkJobsetWrapped {
|
||||
$drvPathToId{$x->{drvPath}} = $x;
|
||||
}
|
||||
|
||||
foreach my $job (values %{$jobs}) {
|
||||
next unless $job->{constituents};
|
||||
|
||||
foreach my $job (values @jobsWithConstituents) {
|
||||
next unless defined $job->{constituents};
|
||||
if (defined $job->{error}) {
|
||||
die "aggregate job ‘$job->{jobName}’ failed with the error: $job->{error}\n";
|
||||
die "aggregate job ‘$job->{attr}’ failed with the error: $job->{error}\n";
|
||||
}
|
||||
|
||||
my $x = $drvPathToId{$job->{drvPath}} or
|
||||
die "aggregate job ‘$job->{jobName}’ has no corresponding build record.\n";
|
||||
die "aggregate job ‘$job->{attr}’ has no corresponding build record.\n";
|
||||
foreach my $drvPath (@{$job->{constituents}}) {
|
||||
my $constituent = $drvPathToId{$drvPath};
|
||||
if (defined $constituent) {
|
||||
$db->resultset('AggregateConstituents')->update_or_create({aggregate => $x->{id}, constituent => $constituent->{id}});
|
||||
} else {
|
||||
warn "aggregate job ‘$job->{jobName}’ has a constituent ‘$drvPath’ that doesn't correspond to a Hydra build\n";
|
||||
warn "aggregate job ‘$job->{attr}’ has a constituent ‘$drvPath’ that doesn't correspond to a Hydra build\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -857,11 +904,15 @@ sub checkJobsetWrapped {
|
||||
$jobset->update({ enabled => 0 }) if $jobset->enabled == 2;
|
||||
|
||||
$jobset->update({ lastcheckedtime => time, forceeval => undef });
|
||||
|
||||
$evaluationErrorRecord->update({ errormsg => $evaluationErrorMsg });
|
||||
setJobsetError($jobset, $evaluationErrorMsg, $evaluationErrorTime);
|
||||
|
||||
$evalStop = clock_gettime(CLOCK_MONOTONIC);
|
||||
$ev->update({ evaltime => abs(int($evalStop - $evalStart)) });
|
||||
});
|
||||
|
||||
my $dbStop = clock_gettime(CLOCK_MONOTONIC);
|
||||
|
||||
Net::Statsd::timing("hydra.evaluator.db_time", int(($dbStop - $dbStart) * 1000));
|
||||
Net::Statsd::timing("hydra.evaluator.eval_time", int(($evalStop - $evalStart) * 1000));
|
||||
Net::Statsd::increment("hydra.evaluator.evals");
|
||||
Net::Statsd::increment("hydra.evaluator.cached_evals") unless $jobsetChanged;
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ use warnings;
|
||||
use File::Path;
|
||||
use File::stat;
|
||||
use File::Basename;
|
||||
use Nix::Store;
|
||||
use Hydra::Config;
|
||||
use Hydra::Schema;
|
||||
use Hydra::Helper::Nix;
|
||||
@@ -47,7 +46,7 @@ sub keepBuild {
|
||||
$build->finished && ($build->buildstatus == 0 || $build->buildstatus == 6))
|
||||
{
|
||||
foreach my $path (split / /, $build->get_column('outpaths')) {
|
||||
if (isValidPath($path)) {
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($path)) {
|
||||
addRoot $path;
|
||||
} else {
|
||||
print STDERR " warning: output ", $path, " has disappeared\n" if $build->finished;
|
||||
@@ -55,7 +54,7 @@ sub keepBuild {
|
||||
}
|
||||
}
|
||||
if (!$build->finished || ($keepFailedDrvs && $build->buildstatus != 0)) {
|
||||
if (isValidPath($build->drvpath)) {
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($build->drvpath)) {
|
||||
addRoot $build->drvpath;
|
||||
} else {
|
||||
print STDERR " warning: derivation ", $build->drvpath, " has disappeared\n";
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
|
||||
init_remote(){
|
||||
local url=$1;
|
||||
git init;
|
||||
git init --initial-branch=trunk;
|
||||
git remote add origin $url;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
sqldir = $(libexecdir)/hydra/sql
|
||||
nobase_dist_sql_DATA = \
|
||||
hydra.sql \
|
||||
test.sql \
|
||||
upgrade-*.sql \
|
||||
update-dbix.pl
|
||||
|
||||
update-dbix: hydra.sql
|
||||
./update-dbix-harness.sh
|
||||
90
src/sql/meson.build
Normal file
90
src/sql/meson.build
Normal file
@@ -0,0 +1,90 @@
|
||||
sql_files = files(
|
||||
'hydra.sql',
|
||||
'test.sql',
|
||||
'update-dbix.pl',
|
||||
'upgrade-2.sql',
|
||||
'upgrade-3.sql',
|
||||
'upgrade-4.sql',
|
||||
'upgrade-5.sql',
|
||||
'upgrade-6.sql',
|
||||
'upgrade-7.sql',
|
||||
'upgrade-8.sql',
|
||||
'upgrade-9.sql',
|
||||
'upgrade-10.sql',
|
||||
'upgrade-11.sql',
|
||||
'upgrade-12.sql',
|
||||
'upgrade-13.sql',
|
||||
'upgrade-14.sql',
|
||||
'upgrade-15.sql',
|
||||
'upgrade-16.sql',
|
||||
'upgrade-17.sql',
|
||||
'upgrade-18.sql',
|
||||
'upgrade-19.sql',
|
||||
'upgrade-20.sql',
|
||||
'upgrade-21.sql',
|
||||
'upgrade-22.sql',
|
||||
'upgrade-23.sql',
|
||||
'upgrade-24.sql',
|
||||
'upgrade-25.sql',
|
||||
'upgrade-26.sql',
|
||||
'upgrade-27.sql',
|
||||
'upgrade-28.sql',
|
||||
'upgrade-29.sql',
|
||||
'upgrade-30.sql',
|
||||
'upgrade-31.sql',
|
||||
'upgrade-32.sql',
|
||||
'upgrade-33.sql',
|
||||
'upgrade-34.sql',
|
||||
'upgrade-35.sql',
|
||||
'upgrade-36.sql',
|
||||
'upgrade-37.sql',
|
||||
'upgrade-38.sql',
|
||||
'upgrade-39.sql',
|
||||
'upgrade-40.sql',
|
||||
'upgrade-41.sql',
|
||||
'upgrade-42.sql',
|
||||
'upgrade-43.sql',
|
||||
'upgrade-44.sql',
|
||||
'upgrade-45.sql',
|
||||
'upgrade-46.sql',
|
||||
'upgrade-47.sql',
|
||||
'upgrade-48.sql',
|
||||
'upgrade-49.sql',
|
||||
'upgrade-50.sql',
|
||||
'upgrade-51.sql',
|
||||
'upgrade-52.sql',
|
||||
'upgrade-53.sql',
|
||||
'upgrade-54.sql',
|
||||
'upgrade-55.sql',
|
||||
'upgrade-56.sql',
|
||||
'upgrade-57.sql',
|
||||
'upgrade-58.sql',
|
||||
'upgrade-59.sql',
|
||||
'upgrade-60.sql',
|
||||
'upgrade-61.sql',
|
||||
'upgrade-62.sql',
|
||||
'upgrade-63.sql',
|
||||
'upgrade-64.sql',
|
||||
'upgrade-65.sql',
|
||||
'upgrade-66.sql',
|
||||
'upgrade-67.sql',
|
||||
'upgrade-68.sql',
|
||||
'upgrade-69.sql',
|
||||
'upgrade-70.sql',
|
||||
'upgrade-71.sql',
|
||||
'upgrade-72.sql',
|
||||
'upgrade-73.sql',
|
||||
'upgrade-74.sql',
|
||||
'upgrade-75.sql',
|
||||
'upgrade-76.sql',
|
||||
'upgrade-77.sql',
|
||||
'upgrade-78.sql',
|
||||
'upgrade-79.sql',
|
||||
'upgrade-80.sql',
|
||||
'upgrade-81.sql',
|
||||
'upgrade-82.sql',
|
||||
'upgrade-83.sql',
|
||||
'upgrade-84.sql',
|
||||
)
|
||||
|
||||
install_data(sql_files, install_dir: hydra_libexecdir / 'sql')
|
||||
@@ -1,4 +0,0 @@
|
||||
EXTRA_DIST = COPYING.LIB StayPuft.ttf
|
||||
|
||||
ttfdir = $(libexecdir)/hydra/ttf
|
||||
nobase_ttf_DATA = $(EXTRA_DIST)
|
||||
5
src/ttf/meson.build
Normal file
5
src/ttf/meson.build
Normal file
@@ -0,0 +1,5 @@
|
||||
data_files = files(
|
||||
'StayPuft.ttf',
|
||||
'COPYING.LIB',
|
||||
)
|
||||
install_data(data_files, install_dir: hydra_libexecdir / 'ttf')
|
||||
@@ -57,6 +57,7 @@ subtest "getLDAPConfig" => sub {
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
}
|
||||
},
|
||||
"The empty file and set env var make legacy mode active."
|
||||
@@ -177,6 +178,7 @@ subtest "get_legacy_ldap_config" => sub {
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
}
|
||||
},
|
||||
"Legacy, default role maps are applied."
|
||||
|
||||
@@ -22,9 +22,24 @@ sub is_json {
|
||||
}
|
||||
|
||||
my $ctx = test_context();
|
||||
|
||||
Catalyst::Test->import('Hydra');
|
||||
|
||||
# Create a user to log in to
|
||||
my $user = $ctx->db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@example.com', password => '!' });
|
||||
$user->setPassword('foobar');
|
||||
$user->userroles->update_or_create({ role => 'admin' });
|
||||
|
||||
# Login and save cookie for future requests
|
||||
my $req = request(POST '/login',
|
||||
Referer => 'http://localhost/',
|
||||
Content => {
|
||||
username => 'alice',
|
||||
password => 'foobar'
|
||||
}
|
||||
);
|
||||
is($req->code, 302, "The login redirects");
|
||||
my $cookie = $req->header("set-cookie");
|
||||
|
||||
my $finishedBuilds = $ctx->makeAndEvaluateJobset(
|
||||
expression => "one-job.nix",
|
||||
build => 1
|
||||
@@ -109,7 +124,10 @@ subtest "/api/push" => sub {
|
||||
my $jobsetName = $jobset->name;
|
||||
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
|
||||
|
||||
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1");
|
||||
my $response = request(POST "/api/push?jobsets=$projectName:$jobsetName&force=1",
|
||||
Cookie => $cookie,
|
||||
Referer => 'http://localhost/',
|
||||
);
|
||||
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
||||
|
||||
my $data = is_json($response);
|
||||
@@ -128,7 +146,10 @@ subtest "/api/push" => sub {
|
||||
|
||||
print STDERR $repo;
|
||||
|
||||
my $response = request(GET "/api/push?repos=$repo&force=1");
|
||||
my $response = request(POST "/api/push?repos=$repo&force=1",
|
||||
Cookie => $cookie,
|
||||
Referer => 'http://localhost/',
|
||||
);
|
||||
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
||||
|
||||
my $data = is_json($response);
|
||||
|
||||
@@ -54,13 +54,14 @@ subtest "/job/PROJECT/JOBSET/JOB/shield" => sub {
|
||||
|
||||
subtest "/job/PROJECT/JOBSET/JOB/prometheus" => sub {
|
||||
my $response = request(GET '/job/' . $project->name . '/' . $jobset->name . '/' . $build->job . '/prometheus');
|
||||
ok($response->is_success, "The page showing the job's prometheus data returns 200.");
|
||||
my $metrics = $response->content;
|
||||
|
||||
ok($metrics =~ m/hydra_job_failed\{.*\} 0/);
|
||||
ok($metrics =~ m/hydra_job_completion_time\{.*\} [\d]+/);
|
||||
ok($metrics =~ m/hydra_build_closure_size\{.*\} 96/);
|
||||
ok($metrics =~ m/hydra_build_output_size\{.*\} 96/);
|
||||
ok($response->is_success, "The page showing the job's prometheus data returns 200.");
|
||||
|
||||
my $metrics = $response->content;
|
||||
like($metrics, qr/hydra_job_failed\{.*\} 0/);
|
||||
like($metrics, qr/hydra_job_completion_time\{.*\} [\d]+/);
|
||||
like($metrics, qr/hydra_build_closure_size\{.*\} 96/);
|
||||
like($metrics, qr/hydra_build_output_size\{.*\} 96/);
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -186,7 +186,7 @@ subtest 'Update jobset "job" to have an invalid input type' => sub {
|
||||
})
|
||||
);
|
||||
ok(!$jobsetupdate->is_success);
|
||||
ok($jobsetupdate->content =~ m/Invalid input type.*valid types:/);
|
||||
like($jobsetupdate->content, qr/Invalid input type.*valid types:/);
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ my $cookie = $login->header("set-cookie");
|
||||
my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie);
|
||||
ok($my_jobs->is_success);
|
||||
my $content = $my_jobs->content();
|
||||
ok($content =~ /empty_dir/);
|
||||
like($content, qr/empty_dir/);
|
||||
ok(!($content =~ /fails/));
|
||||
ok(!($content =~ /succeed_with_failed/));
|
||||
done_testing;
|
||||
|
||||
@@ -24,6 +24,7 @@ $ldap->add_group("hydra_create-projects", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_eval-jobset", $users->{"many_roles"}->{"username"});
|
||||
|
||||
my $hydra_ldap_config = "${\$ldap->tmpdir()}/hydra_ldap_config.yaml";
|
||||
LDAPContext::write_file($hydra_ldap_config, <<YAML);
|
||||
@@ -68,7 +69,7 @@ subtest "Valid login attempts" => sub {
|
||||
unrelated => [],
|
||||
admin => ["admin"],
|
||||
not_admin => [],
|
||||
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ],
|
||||
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build", "eval-jobset" ],
|
||||
);
|
||||
for my $username (keys %users_to_roles) {
|
||||
my $user = $users->{$username};
|
||||
|
||||
@@ -24,6 +24,7 @@ $ldap->add_group("hydra_create-projects", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"});
|
||||
$ldap->add_group("hydra_eval-jobset", $users->{"many_roles"}->{"username"});
|
||||
|
||||
|
||||
my $ctx = test_context(
|
||||
@@ -76,10 +77,12 @@ my $ctx = test_context(
|
||||
hydra_cancel-build = cancel-build
|
||||
hydra_bump-to-front = bump-to-front
|
||||
hydra_restart-jobs = restart-jobs
|
||||
hydra_eval-jobset = eval-jobset
|
||||
|
||||
hydra_one_group_many_roles = create-projects
|
||||
hydra_one_group_many_roles = cancel-build
|
||||
hydra_one_group_many_roles = bump-to-front
|
||||
hydra_one_group_many-roles = eval-jobset
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
CFG
|
||||
@@ -92,7 +95,7 @@ subtest "Valid login attempts" => sub {
|
||||
unrelated => [],
|
||||
admin => ["admin"],
|
||||
not_admin => [],
|
||||
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ],
|
||||
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build", "eval-jobset" ],
|
||||
many_roles_one_group => [ "create-projects", "bump-to-front", "cancel-build" ],
|
||||
);
|
||||
for my $username (keys %users_to_roles) {
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
TESTS_ENVIRONMENT = \
|
||||
BZR_HOME="$(abs_builddir)/data" \
|
||||
HYDRA_DBI="dbi:Pg:dbname=hydra-test-suite;port=6433" \
|
||||
HYDRA_DATA="$(abs_builddir)/data" \
|
||||
HYDRA_HOME="$(top_srcdir)/src" \
|
||||
HYDRA_CONFIG= \
|
||||
NIX_REMOTE= \
|
||||
NIX_REMOTE_SYSTEMS= \
|
||||
NIX_CONF_DIR="$(abs_builddir)/nix/etc/nix" \
|
||||
NIX_STATE_DIR="$(abs_builddir)/nix/var/nix" \
|
||||
NIX_STORE_DIR="$(abs_builddir)/nix/store" \
|
||||
NIX_LOG_DIR="$(abs_builddir)/nix/var/log/nix" \
|
||||
PGHOST=/tmp \
|
||||
PERL5LIB="$(srcdir):$(abs_top_srcdir)/src/lib:$$PERL5LIB" \
|
||||
PYTHONPATH= \
|
||||
PATH=$(abs_top_srcdir)/src/hydra-evaluator:$(abs_top_srcdir)/src/script:$(abs_top_srcdir)/src/hydra-eval-jobs:$(abs_top_srcdir)/src/hydra-queue-runner:$$PATH \
|
||||
perl -w
|
||||
|
||||
EXTRA_DIST = \
|
||||
$(wildcard *.pm) \
|
||||
$(wildcard jobs/*.nix) \
|
||||
$(wildcard jobs/*.sh) \
|
||||
$(TESTS)
|
||||
|
||||
TESTS = \
|
||||
perlcritic.pl \
|
||||
test.pl
|
||||
|
||||
check_SCRIPTS = repos
|
||||
|
||||
repos: dirs
|
||||
|
||||
dirs:
|
||||
mkdir -p data
|
||||
touch data/hydra.conf
|
||||
mkdir -p nix
|
||||
mkdir -p nix/etc/nix
|
||||
mkdir -p nix/store
|
||||
mkdir -p nix/var
|
||||
@@ -115,7 +115,7 @@ subtest "evaluation" => sub {
|
||||
my $build = decode_json(request_json({ uri => "/build/" . $evals->[0]->{builds}->[0] })->content());
|
||||
is($build->{job}, "job", "The build's job name is job");
|
||||
is($build->{finished}, 0, "The build isn't finished yet");
|
||||
ok($build->{buildoutputs}->{out}->{path} =~ /\/nix\/store\/[a-zA-Z0-9]{32}-job$/, "The build's outpath is in the Nix store and named 'job'");
|
||||
like($build->{buildoutputs}->{out}->{path}, qr/\/nix\/store\/[a-zA-Z0-9]{32}-job$/, "The build's outpath is in the Nix store and named 'job'");
|
||||
|
||||
subtest "search" => sub {
|
||||
my $search_project = decode_json(request_json({ uri => "/search/?query=sample" })->content());
|
||||
|
||||
@@ -27,13 +27,13 @@ my $project = $db->resultset('Projects')->create({name => "tests", displayname =
|
||||
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||
|
||||
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix should exit with return code 0");
|
||||
is(nrQueuedBuildsForJobset($jobset), 5, "Evaluating jobs/content-addressed.nix should result in 4 builds");
|
||||
is(nrQueuedBuildsForJobset($jobset), 6, "Evaluating jobs/content-addressed.nix should result in 6 builds");
|
||||
|
||||
for my $build (queuedBuildsForJobset($jobset)) {
|
||||
ok(runBuild($build), "Build '".$build->job."' from jobs/content-addressed.nix should exit with code 0");
|
||||
my $newbuild = $db->resultset('Builds')->find($build->id);
|
||||
is($newbuild->finished, 1, "Build '".$build->job."' from jobs/content-addressed.nix should be finished.");
|
||||
my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : 0;
|
||||
my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : $build->job =~ /FailingCA/ ? 2 : 0;
|
||||
is($newbuild->buildstatus, $expected, "Build '".$build->job."' from jobs/content-addressed.nix should have buildstatus $expected.");
|
||||
|
||||
my $response = request("/build/".$build->id);
|
||||
@@ -55,6 +55,8 @@ for my $build (queuedBuildsForJobset($jobset)) {
|
||||
|
||||
}
|
||||
|
||||
# XXX: deststoredir is undefined: Use of uninitialized value $ctx{"deststoredir"} in concatenation (.) or string at t/content-addressed/basic.t line 58.
|
||||
# XXX: This test seems to not do what it seems to be doing. See documentation: https://metacpan.org/pod/Test2::V0#isnt($got,-$do_not_want,-$name)
|
||||
isnt(<$ctx{deststoredir}/realisations/*>, "", "The destination store should have the realisations of the built derivations registered");
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -18,14 +18,14 @@ isnt($res, 0, "hydra-eval-jobset exits non-zero");
|
||||
ok(utf8::decode($stderr), "Stderr output is UTF8-clean");
|
||||
like(
|
||||
$stderr,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: "constituentA": does not exist/,
|
||||
"The stderr record includes a relevant error message"
|
||||
);
|
||||
|
||||
$jobset->discard_changes; # refresh from DB
|
||||
$jobset->discard_changes({ '+columns' => {'errormsg' => 'errormsg'} }); # refresh from DB
|
||||
like(
|
||||
$jobset->errormsg,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: constituentA: does not exist/,
|
||||
qr/aggregate job ‘mixed_aggregate’ failed with the error: "constituentA": does not exist/,
|
||||
"The jobset records a relevant error message"
|
||||
);
|
||||
|
||||
|
||||
@@ -5,13 +5,58 @@ use Test2::V0;
|
||||
|
||||
my $ctx = test_context();
|
||||
|
||||
my $builds = $ctx->makeAndEvaluateJobset(
|
||||
expression => 'constituents.nix',
|
||||
my $expression = 'constituents.nix';
|
||||
my $jobsetCtx = $ctx->makeJobset(
|
||||
expression => $expression,
|
||||
);
|
||||
my $builds = $ctx->evaluateJobset(
|
||||
jobset => $jobsetCtx->{"jobset"},
|
||||
expression => $expression,
|
||||
build => 0,
|
||||
);
|
||||
|
||||
my $constituentA = $builds->{"constituentA"};
|
||||
my $directAggregate = $builds->{"direct_aggregate"};
|
||||
my $indirectAggregate = $builds->{"indirect_aggregate"};
|
||||
my $mixedAggregate = $builds->{"mixed_aggregate"};
|
||||
|
||||
# Ensure that we get exactly the aggregates we expect
|
||||
my %expected_constituents = (
|
||||
'direct_aggregate' => {
|
||||
'constituentA' => 1,
|
||||
},
|
||||
'indirect_aggregate' => {
|
||||
'constituentA' => 1,
|
||||
},
|
||||
'mixed_aggregate' => {
|
||||
# Note that `constituentA_alias` becomes `constituentA`, because
|
||||
# the shorter name is preferred
|
||||
'constituentA' => 1,
|
||||
'constituentB' => 1,
|
||||
},
|
||||
);
|
||||
|
||||
my $rs = $ctx->db->resultset('AggregateConstituents')->search(
|
||||
{},
|
||||
{
|
||||
join => [ 'aggregate', 'constituent' ], # Use correct relationship names
|
||||
columns => [],
|
||||
'+select' => [ 'aggregate.job', 'constituent.job' ],
|
||||
'+as' => [ 'aggregate_job', 'constituent_job' ],
|
||||
}
|
||||
);
|
||||
|
||||
my %actual_constituents;
|
||||
while (my $row = $rs->next) {
|
||||
my $aggregate_job = $row->get_column('aggregate_job');
|
||||
my $constituent_job = $row->get_column('constituent_job');
|
||||
$actual_constituents{$aggregate_job} //= {};
|
||||
$actual_constituents{$aggregate_job}{$constituent_job} = 1;
|
||||
}
|
||||
|
||||
is(\%actual_constituents, \%expected_constituents, "Exact aggregate constituents as expected");
|
||||
|
||||
# Check that deletion also doesn't work accordingly
|
||||
|
||||
is(system('nix-store', '--delete', $constituentA->drvpath), 256, "Deleting a constituent derivation fails");
|
||||
is(system('nix-store', '--delete', $directAggregate->drvpath), 256, "Deleting the direct aggregate derivation fails");
|
||||
|
||||
67
t/evaluator/evaluate-flake.t
Normal file
67
t/evaluator/evaluate-flake.t
Normal file
@@ -0,0 +1,67 @@
|
||||
use feature 'unicode_strings';
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
use Test2::V0;
|
||||
use File::Copy qw(cp);
|
||||
|
||||
my $ctx = test_context(
|
||||
nix_config => qq|
|
||||
experimental-features = nix-command flakes
|
||||
|,
|
||||
hydra_config => q|
|
||||
<runcommand>
|
||||
evaluator_pure_eval = false
|
||||
</runcommand>
|
||||
|
|
||||
);
|
||||
|
||||
sub checkFlake {
|
||||
my ($flake) = @_;
|
||||
|
||||
cp($ctx->jobsdir . "/basic.nix", $ctx->jobsdir . "/" . $flake);
|
||||
cp($ctx->jobsdir . "/config.nix", $ctx->jobsdir . "/" . $flake);
|
||||
cp($ctx->jobsdir . "/empty-dir-builder.sh", $ctx->jobsdir . "/" . $flake);
|
||||
cp($ctx->jobsdir . "/fail.sh", $ctx->jobsdir . "/" . $flake);
|
||||
cp($ctx->jobsdir . "/succeed-with-failed.sh", $ctx->jobsdir . "/" . $flake);
|
||||
|
||||
chmod 0755, $ctx->jobsdir . "/" . $flake . "/empty-dir-builder.sh";
|
||||
chmod 0755, $ctx->jobsdir . "/" . $flake . "/fail.sh";
|
||||
chmod 0755, $ctx->jobsdir . "/" . $flake . "/succeed-with-failed.sh";
|
||||
|
||||
my $builds = $ctx->makeAndEvaluateJobset(
|
||||
flake => 'path:' . $ctx->jobsdir . "/" . $flake,
|
||||
build => 1
|
||||
);
|
||||
|
||||
subtest "Build: succeed_with_failed" => sub {
|
||||
my $build = $builds->{"succeed_with_failed"};
|
||||
|
||||
is($build->finished, 1, "Build should be finished.");
|
||||
is($build->buildstatus, 6, "succeeeded-but-failed should have buildstatus 6.");
|
||||
};
|
||||
|
||||
subtest "Build: empty_dir" => sub {
|
||||
my $build = $builds->{"empty_dir"};
|
||||
|
||||
is($build->finished, 1, "Build should be finished.");
|
||||
is($build->buildstatus, 0, "Should have succeeded.");
|
||||
};
|
||||
|
||||
subtest "Build: fails" => sub {
|
||||
my $build = $builds->{"fails"};
|
||||
|
||||
is($build->finished, 1, "Build should be finished.");
|
||||
is($build->buildstatus, 1, "Should have failed.");
|
||||
};
|
||||
}
|
||||
|
||||
subtest "Flake using `checks`" => sub {
|
||||
checkFlake 'flake-checks'
|
||||
};
|
||||
|
||||
subtest "Flake using `hydraJobs`" => sub {
|
||||
checkFlake 'flake-hydraJobs'
|
||||
};
|
||||
|
||||
done_testing;
|
||||
22
t/evaluator/evaluate-meta.t
Normal file
22
t/evaluator/evaluate-meta.t
Normal file
@@ -0,0 +1,22 @@
|
||||
use feature 'unicode_strings';
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
use Test2::V0;
|
||||
|
||||
my $ctx = test_context();
|
||||
|
||||
my $builds = $ctx->makeAndEvaluateJobset(
|
||||
expression => "meta.nix",
|
||||
build => 1
|
||||
);
|
||||
|
||||
my $build = $builds->{"full-of-meta"};
|
||||
|
||||
is($build->finished, 1, "Build should be finished.");
|
||||
is($build->description, "This is the description of the job.", "Wrong description extracted from the build.");
|
||||
is($build->license, "MIT, BSD", "Wrong licenses extracted from the build.");
|
||||
is($build->homepage, "https://example.com/", "Wrong homepage extracted from the build.");
|
||||
is($build->maintainers, 'alice@example.com, bob@not.found', "Wrong maintainers extracted from the build.");
|
||||
|
||||
done_testing;
|
||||
@@ -31,6 +31,10 @@ if ($sd_res != 0) {
|
||||
skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)");
|
||||
}
|
||||
|
||||
# XXX(Mindavi): We should think about how to fix this.
|
||||
# Note that it was always skipped on ofborg/h.n.o (nixos hydra) since systemd-run is not present in the ambient environment there.
|
||||
skip_all("Always fails, an error about 'oom' being a string is logged and the process never OOMs. Needs a way to use more memory.");
|
||||
|
||||
my $ctx = test_context();
|
||||
|
||||
# Contain the memory usage to 25 MegaBytes using `systemd-run`
|
||||
|
||||
@@ -5,6 +5,8 @@ rec {
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
constituentA_alias = constituentA;
|
||||
|
||||
constituentB = mkDerivation {
|
||||
name = "empty-dir-B";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
@@ -32,7 +34,7 @@ rec {
|
||||
name = "mixed_aggregate";
|
||||
_hydraAggregate = true;
|
||||
constituents = [
|
||||
"constituentA"
|
||||
"constituentA_alias"
|
||||
constituentB
|
||||
];
|
||||
builder = ./empty-dir-builder.sh;
|
||||
|
||||
@@ -25,6 +25,13 @@ rec {
|
||||
FOO = empty_dir;
|
||||
};
|
||||
|
||||
caDependingOnFailingCA =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "ca-depending-on-failing-ca";
|
||||
builder = ./dir-with-file-builder.sh;
|
||||
FOO = fails;
|
||||
};
|
||||
|
||||
nonCaDependingOnCA =
|
||||
cfg.mkDerivation {
|
||||
name = "non-ca-depending-on-ca";
|
||||
|
||||
6
t/jobs/flake-checks/flake.nix
Normal file
6
t/jobs/flake-checks/flake.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
outputs = { ... }: {
|
||||
checks =
|
||||
import ./basic.nix;
|
||||
};
|
||||
}
|
||||
6
t/jobs/flake-hydraJobs/flake.nix
Normal file
6
t/jobs/flake-hydraJobs/flake.nix
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
outputs = { ... }: {
|
||||
hydraJobs =
|
||||
import ./basic.nix;
|
||||
};
|
||||
}
|
||||
17
t/jobs/meta.nix
Normal file
17
t/jobs/meta.nix
Normal file
@@ -0,0 +1,17 @@
|
||||
with import ./config.nix;
|
||||
{
|
||||
full-of-meta =
|
||||
mkDerivation {
|
||||
name = "full-of-meta";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
|
||||
meta = {
|
||||
description = "This is the description of the job.";
|
||||
license = [ { shortName = "MIT"; } "BSD" ];
|
||||
homepage = "https://example.com/";
|
||||
maintainers = [ "alice@example.com" { email = "bob@not.found"; } ];
|
||||
|
||||
outPath = "${placeholder "out"}";
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -4,6 +4,8 @@ use warnings;
|
||||
package HydraTestContext;
|
||||
use File::Path qw(make_path);
|
||||
use File::Basename;
|
||||
use File::Copy::Recursive qw(rcopy);
|
||||
use File::Which qw(which);
|
||||
use Cwd qw(abs_path getcwd);
|
||||
use CliRunners;
|
||||
use Hydra::Helper::Exec;
|
||||
@@ -77,6 +79,13 @@ sub new {
|
||||
);
|
||||
$ENV{'HYDRA_DBI'} = $pgsql->dsn;
|
||||
|
||||
my $jobsdir = "$dir/jobs";
|
||||
rcopy(abs_path(dirname(__FILE__) . "/../jobs"), $jobsdir);
|
||||
|
||||
my $coreutils_path = dirname(which 'install');
|
||||
replace_variable_in_file($jobsdir . "/config.nix", '@testPath@', $coreutils_path);
|
||||
replace_variable_in_file($jobsdir . "/declarative/project.json", '@jobsPath@', $jobsdir);
|
||||
|
||||
my $self = bless {
|
||||
_db => undef,
|
||||
db_handle => $pgsql,
|
||||
@@ -84,7 +93,7 @@ sub new {
|
||||
nix_state_dir => $nix_state_dir,
|
||||
nix_log_dir => $nix_log_dir,
|
||||
testdir => abs_path(dirname(__FILE__) . "/.."),
|
||||
jobsdir => abs_path(dirname(__FILE__) . "/../jobs"),
|
||||
jobsdir => $jobsdir,
|
||||
deststoredir => $deststoredir,
|
||||
}, $class;
|
||||
|
||||
@@ -92,7 +101,7 @@ sub new {
|
||||
$opts{'before_init'}->($self);
|
||||
}
|
||||
|
||||
expectOkay(5, ("hydra-init"));
|
||||
expectOkay(30, ("hydra-init"));
|
||||
|
||||
return $self;
|
||||
}
|
||||
@@ -156,20 +165,46 @@ sub nix_state_dir {
|
||||
sub makeAndEvaluateJobset {
|
||||
my ($self, %opts) = @_;
|
||||
|
||||
my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeAndEvaluateJobset.\n";
|
||||
my $jobsdir = $opts{'jobsdir'} // $self->jobsdir;
|
||||
my $should_build = $opts{'build'} // 0;
|
||||
my $expression = $opts{'expression'};
|
||||
my $flake = $opts{'flake'};
|
||||
if (not $expression and not $flake) {
|
||||
die "One of 'expression' or 'flake' must be passed to makeEvaluateJobset.\n";
|
||||
}
|
||||
|
||||
my $jobsetCtx = $self->makeJobset(
|
||||
expression => $expression,
|
||||
my $jobsdir = $opts{'jobsdir'} // $self->jobsdir;
|
||||
|
||||
my %args = (
|
||||
jobsdir => $jobsdir,
|
||||
);
|
||||
my $jobset = $jobsetCtx->{"jobset"};
|
||||
if ($expression) {
|
||||
$args{expression} = $expression;
|
||||
}
|
||||
if ($flake) {
|
||||
$args{flake} = $flake;
|
||||
}
|
||||
my $jobsetCtx = $self->makeJobset(%args);
|
||||
|
||||
return $self->evaluateJobset(
|
||||
jobset => $jobsetCtx->{"jobset"},
|
||||
expression => $expression,
|
||||
flake => $flake,
|
||||
build => $opts{"build"} // 0,
|
||||
)
|
||||
}
|
||||
|
||||
sub evaluateJobset {
|
||||
my ($self, %opts) = @_;
|
||||
|
||||
my $jobset = $opts{'jobset'};
|
||||
|
||||
my $expression = $opts{'expression'} // $opts{'flake'};
|
||||
|
||||
evalSucceeds($jobset) or die "Evaluating jobs/$expression should exit with return code 0.\n";
|
||||
|
||||
my $builds = {};
|
||||
|
||||
my $should_build = $opts{'build'};
|
||||
|
||||
for my $build ($jobset->builds) {
|
||||
if ($should_build) {
|
||||
runBuild($build) or die "Build '".$build->job."' from jobs/$expression should exit with return code 0.\n";
|
||||
@@ -186,7 +221,7 @@ sub makeAndEvaluateJobset {
|
||||
#
|
||||
# In return, you get a hash of the user, project, and jobset records.
|
||||
#
|
||||
# This always uses an `expression` from the `jobsdir` directory.
|
||||
# This always uses an `expression` or `flake` from the `jobsdir` directory.
|
||||
#
|
||||
# Hash Parameters:
|
||||
#
|
||||
@@ -195,7 +230,12 @@ sub makeAndEvaluateJobset {
|
||||
sub makeJobset {
|
||||
my ($self, %opts) = @_;
|
||||
|
||||
my $expression = $opts{'expression'} || die "Mandatory 'expression' option not passed to makeJobset.\n";
|
||||
my $expression = $opts{'expression'};
|
||||
my $flake = $opts{'flake'};
|
||||
if (not $expression and not $flake) {
|
||||
die "One of 'expression' or 'flake' must be passed to makeJobset.\n";
|
||||
}
|
||||
|
||||
my $jobsdir = $opts{'jobsdir'} // $self->jobsdir;
|
||||
|
||||
# Create a new user for this test
|
||||
@@ -213,12 +253,20 @@ sub makeJobset {
|
||||
});
|
||||
|
||||
# Create a new jobset for this test and set up the inputs
|
||||
my $jobset = $project->jobsets->create({
|
||||
my %args = (
|
||||
name => rand_chars(),
|
||||
nixexprinput => "jobs",
|
||||
nixexprpath => $expression,
|
||||
emailoverride => ""
|
||||
});
|
||||
);
|
||||
if ($expression) {
|
||||
$args{type} = 0;
|
||||
$args{nixexprinput} = "jobs";
|
||||
$args{nixexprpath} = $expression;
|
||||
}
|
||||
if ($flake) {
|
||||
$args{type} = 1;
|
||||
$args{flake} = $flake;
|
||||
}
|
||||
my $jobset = $project->jobsets->create(\%args);
|
||||
my $jobsetinput = $jobset->jobsetinputs->create({name => "jobs", type => "path"});
|
||||
$jobsetinput->jobsetinputalts->create({altnr => 0, value => $jobsdir});
|
||||
|
||||
@@ -243,6 +291,18 @@ sub write_file {
|
||||
close $fh;
|
||||
}
|
||||
|
||||
sub replace_variable_in_file {
|
||||
my ($fn, $var, $val) = @_;
|
||||
|
||||
open (my $input, '<', "$fn.in") or die $!;
|
||||
open (my $output, '>', $fn) or die $!;
|
||||
|
||||
while (my $line = <$input>) {
|
||||
$line =~ s/$var/$val/g;
|
||||
print $output $line;
|
||||
}
|
||||
}
|
||||
|
||||
sub rand_chars {
|
||||
return sprintf("t%08X", rand(0xFFFFFFFF));
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ sub add_user {
|
||||
my $email = $opts{'email'} // "$name\@example";
|
||||
my $password = $opts{'password'} // rand_chars();
|
||||
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(1, ("slappasswd", "-s", $password));
|
||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("slappasswd", "-s", $password));
|
||||
if ($res) {
|
||||
die "Failed to execute slappasswd ($res): $stderr, $stdout";
|
||||
}
|
||||
@@ -178,7 +178,7 @@ sub start {
|
||||
sub validateConfig {
|
||||
my ($self) = @_;
|
||||
|
||||
expectOkay(1, ("slaptest", "-u", "-F", $self->{"_slapd_dir"}));
|
||||
expectOkay(5, ("slaptest", "-u", "-F", $self->{"_slapd_dir"}));
|
||||
}
|
||||
|
||||
sub _spawn {
|
||||
@@ -218,7 +218,7 @@ sub load_ldif {
|
||||
|
||||
my $path = "${\$self->{'_tmpdir'}}/load.ldif";
|
||||
write_file($path, $content);
|
||||
expectOkay(1, ("slapadd", "-F", $self->{"_slapd_dir"}, "-b", $suffix, "-l", $path));
|
||||
expectOkay(5, ("slapadd", "-F", $self->{"_slapd_dir"}, "-b", $suffix, "-l", $path));
|
||||
$self->validateConfig();
|
||||
}
|
||||
|
||||
|
||||
44
t/meson.build
Normal file
44
t/meson.build
Normal file
@@ -0,0 +1,44 @@
|
||||
fs = import('fs')
|
||||
|
||||
test('perlcritic',
|
||||
perl,
|
||||
args: ['-w', files('perlcritic.pl')],
|
||||
workdir: meson.project_source_root(),
|
||||
timeout: -1,
|
||||
)
|
||||
|
||||
testenv = environment(
|
||||
{
|
||||
'BZR_HOME': meson.current_build_dir() / 'data',
|
||||
'HYDRA_DBI': 'dbi:Pg:dbname=hydra-test-suite;port=6433',
|
||||
'HYDRA_DATA': meson.current_build_dir() / 'data',
|
||||
'HYDRA_HOME': meson.project_source_root() / 'src',
|
||||
'PGHOST': '/tmp',
|
||||
'PYTHONPATH': '',
|
||||
|
||||
# libpqxx seems to randomly crash with certain values of MALLOC_PERTURB_,
|
||||
# set by default by Meson's test(). Very promising, high quality software.
|
||||
'MALLOC_PERTURB_': '0',
|
||||
},
|
||||
)
|
||||
testenv.prepend('PERL5LIB',
|
||||
meson.current_source_dir(),
|
||||
meson.project_source_root() / 'src/lib',
|
||||
separator: ':'
|
||||
)
|
||||
testenv.prepend('PATH',
|
||||
fs.parent(find_program('nix').full_path()),
|
||||
fs.parent(hydra_build_step.full_path()),
|
||||
fs.parent(hydra_evaluator.full_path()),
|
||||
fs.parent(hydra_queue_runner.full_path()),
|
||||
meson.project_source_root() / 'src/script',
|
||||
separator: ':'
|
||||
)
|
||||
|
||||
test('testsuite',
|
||||
perl,
|
||||
args: ['-I', meson.current_source_dir() / 'lib', '-w', files('test.pl')],
|
||||
env: testenv,
|
||||
workdir: meson.current_source_dir(),
|
||||
timeout: -1,
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user