Compare commits
176 Commits
nix-2.17
...
eval-jobse
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f730433789 | ||
|
|
916531dc9c | ||
|
|
b1a0501520 | ||
|
|
b94a7b6d5c | ||
|
|
9ee3c6aea2 | ||
|
|
4bb2f08be1 | ||
|
|
c23973785f | ||
|
|
b2b2d6e26c | ||
|
|
99ca560d58 | ||
|
|
2c886f51d3 | ||
|
|
7de7122479 | ||
|
|
54002f0fcf | ||
|
|
a6b14369ee | ||
|
|
578a3d2292 | ||
|
|
ada51d70fc | ||
|
|
bc19e7cd65 | ||
|
|
d7986226f0 | ||
|
|
b3e0d9a8b7 | ||
|
|
5728011da1 | ||
|
|
559376e907 | ||
|
|
998df1657e | ||
|
|
f99cdaf5fe | ||
|
|
3bf00e31c0 | ||
|
|
e149da7b9b | ||
|
|
e81c36ac92 | ||
|
|
743795b2b0 | ||
|
|
50378aef22 | ||
|
|
92155f9a07 | ||
|
|
29ce5c603c | ||
|
|
4bd687e3e6 | ||
|
|
1b8154e67f | ||
|
|
b72528be50 | ||
|
|
8b48579593 | ||
|
|
ef7bf1e67b | ||
|
|
ab1f64aa4d | ||
|
|
3f913a771d | ||
|
|
71986632ce | ||
|
|
1665aed5e3 | ||
|
|
b676b08fac | ||
|
|
d614163e9c | ||
|
|
99afff03b0 | ||
|
|
8f56209bd6 | ||
|
|
806c375c33 | ||
|
|
669617ab54 | ||
|
|
c45c06509a | ||
|
|
9db5d0a88d | ||
|
|
973cb644d3 | ||
|
|
e499509595 | ||
|
|
ceff5c5cfe | ||
|
|
878c0f240e | ||
|
|
c1bd50a80d | ||
|
|
14aabc1cc9 | ||
|
|
7b826ec5ad | ||
|
|
838648c0ce | ||
|
|
6ac4292912 | ||
|
|
b503280256 | ||
|
|
b4c91b5a6a | ||
|
|
8477009310 | ||
|
|
c62eaf248f | ||
|
|
13b5f007ef | ||
|
|
7f5889559e | ||
|
|
5ee0e443e4 | ||
|
|
323b556dc8 | ||
|
|
458b9e4242 | ||
|
|
fcde5908d8 | ||
|
|
083ef46c12 | ||
|
|
7a53b866f6 | ||
|
|
8a02bb7c36 | ||
|
|
c64eed7d07 | ||
|
|
aed130cd17 | ||
|
|
7a6c401d42 | ||
|
|
b5ed0787f7 | ||
|
|
c5f37eca91 | ||
|
|
73b6c1fb11 | ||
|
|
4bbc7b8f75 | ||
|
|
d6d6d1b649 | ||
|
|
1bd195a513 | ||
|
|
1471aacadc | ||
|
|
62ddeb0ff0 | ||
|
|
a876e46894 | ||
|
|
6df06b089e | ||
|
|
cc50fdff6f | ||
|
|
b1fa6b3aac | ||
|
|
f6a2b7562a | ||
|
|
07cb5d1b7c | ||
|
|
449eb2d873 | ||
|
|
2bdbf51d7d | ||
|
|
9e7ac58042 | ||
|
|
d45e14fd43 | ||
|
|
9a86da0e7b | ||
|
|
d02e20a4c1 | ||
|
|
70e5469303 | ||
|
|
2e6ee28f9b | ||
|
|
20b0ad3ba2 | ||
|
|
7386caaecf | ||
|
|
84c46b6b68 | ||
|
|
f1d9230f25 | ||
|
|
f5c0efb11e | ||
|
|
4e8fbaa3d6 | ||
|
|
34c51fcea9 | ||
|
|
4ac31c89df | ||
|
|
db7aa01b8d | ||
|
|
89cfe26533 | ||
|
|
588a0c5269 | ||
|
|
02e453fc8c | ||
|
|
75f26f1fc4 | ||
|
|
3c89067f52 | ||
|
|
abd858d3dc | ||
|
|
163dbf7f54 | ||
|
|
642156372f | ||
|
|
7517c134c5 | ||
|
|
6e67884ff1 | ||
|
|
a6b6c5a539 | ||
|
|
ebfefb9161 | ||
|
|
8783dd53f6 | ||
|
|
f3a760ad9c | ||
|
|
8c10331ee8 | ||
|
|
20f5a2120c | ||
|
|
b56d2383c1 | ||
|
|
2bd67562b5 | ||
|
|
69a5b00e60 | ||
|
|
1d80b72ffb | ||
|
|
105fd18fee | ||
|
|
f6f817926a | ||
|
|
d0d3b0a298 | ||
|
|
3f932a6731 | ||
|
|
aaa0e128c1 | ||
|
|
4515b5aa17 | ||
|
|
411e4d0c24 | ||
|
|
831021808c | ||
|
|
2ee0068fdc | ||
|
|
31ea6458ca | ||
|
|
20c8263e3c | ||
|
|
91bbd5366f | ||
|
|
a45a27851b | ||
|
|
6a54ab24e2 | ||
|
|
58707438ba | ||
|
|
86cd5e9076 | ||
|
|
11f8030b0f | ||
|
|
3df8feb3a2 | ||
|
|
069b7775c5 | ||
|
|
e3443cd22a | ||
|
|
8046ec2668 | ||
|
|
9ba4417940 | ||
|
|
a5d44b60ea | ||
|
|
363604846a | ||
|
|
162b538912 | ||
|
|
104baef503 | ||
|
|
3c5636162a | ||
|
|
874fcae1e8 | ||
|
|
4dc8fe0b08 | ||
|
|
67eeabd518 | ||
|
|
622c25e3c4 | ||
|
|
f216bce0e6 | ||
|
|
4d1c850512 | ||
|
|
c922e73c11 | ||
|
|
e172461e55 | ||
|
|
0917145622 | ||
|
|
2bda7ca642 | ||
|
|
831a2d9bd5 | ||
|
|
5db374cb50 | ||
|
|
e9da80fff6 | ||
|
|
b7c864c515 | ||
|
|
3526d61ff2 | ||
|
|
143c31734f | ||
|
|
a81c6a3a80 | ||
|
|
750978a192 | ||
|
|
6e571e26ff | ||
|
|
92b627ac1b | ||
|
|
b430d41afd | ||
|
|
fd0ae78eba | ||
|
|
a778a89f04 | ||
|
|
365776f5d7 | ||
|
|
9f1b911625 | ||
|
|
2f494b7834 | ||
|
|
5db8642224 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,6 +38,7 @@ t/jobs/declarative/project.json
|
|||||||
hydra-config.h
|
hydra-config.h
|
||||||
hydra-config.h.in
|
hydra-config.h.in
|
||||||
result
|
result
|
||||||
|
result-*
|
||||||
outputs
|
outputs
|
||||||
config
|
config
|
||||||
stamp-h1
|
stamp-h1
|
||||||
|
|||||||
12
Makefile.am
12
Makefile.am
@@ -1,8 +1,12 @@
|
|||||||
SUBDIRS = src t doc
|
SUBDIRS = src doc
|
||||||
|
if CAN_DO_CHECK
|
||||||
|
SUBDIRS += t
|
||||||
|
endif
|
||||||
|
|
||||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||||
DIST_SUBDIRS = $(SUBDIRS)
|
DIST_SUBDIRS = $(SUBDIRS)
|
||||||
EXTRA_DIST = hydra-module.nix
|
EXTRA_DIST = nixos-modules/hydra.nix
|
||||||
|
|
||||||
install-data-local: hydra-module.nix
|
install-data-local: nixos-modules/hydra.nix
|
||||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
||||||
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
|
$(INSTALL_DATA) nixos-modules/hydra.nix $(DESTDIR)$(datadir)/nix/hydra-module.nix
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ $ nix-build
|
|||||||
You can use the provided shell.nix to get a working development environment:
|
You can use the provided shell.nix to get a working development environment:
|
||||||
```
|
```
|
||||||
$ nix-shell
|
$ nix-shell
|
||||||
$ ./bootstrap
|
$ autoreconfPhase
|
||||||
$ configurePhase # NOTE: not ./configure
|
$ configurePhase # NOTE: not ./configure
|
||||||
$ make
|
$ make
|
||||||
```
|
```
|
||||||
@@ -140,7 +140,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
|
|||||||
## Additional Resources
|
## Additional Resources
|
||||||
|
|
||||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||||
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
|
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||||
|
|
||||||
|
|||||||
20
configure.ac
20
configure.ac
@@ -53,9 +53,6 @@ PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
|||||||
testPath="$(dirname $(type -p expr))"
|
testPath="$(dirname $(type -p expr))"
|
||||||
AC_SUBST(testPath)
|
AC_SUBST(testPath)
|
||||||
|
|
||||||
jobsPath="$(realpath ./t/jobs)"
|
|
||||||
AC_SUBST(jobsPath)
|
|
||||||
|
|
||||||
CXXFLAGS+=" -include nix/config.h"
|
CXXFLAGS+=" -include nix/config.h"
|
||||||
|
|
||||||
AC_CONFIG_FILES([
|
AC_CONFIG_FILES([
|
||||||
@@ -71,11 +68,22 @@ AC_CONFIG_FILES([
|
|||||||
src/lib/Makefile
|
src/lib/Makefile
|
||||||
src/root/Makefile
|
src/root/Makefile
|
||||||
src/script/Makefile
|
src/script/Makefile
|
||||||
t/Makefile
|
|
||||||
t/jobs/config.nix
|
|
||||||
t/jobs/declarative/project.json
|
|
||||||
])
|
])
|
||||||
|
|
||||||
|
# Tests might be filtered out
|
||||||
|
AM_CONDITIONAL([CAN_DO_CHECK], [test -f "$srcdir/t/api-test.t"])
|
||||||
|
AM_COND_IF(
|
||||||
|
[CAN_DO_CHECK],
|
||||||
|
[
|
||||||
|
jobsPath="$(realpath ./t/jobs)"
|
||||||
|
AC_SUBST(jobsPath)
|
||||||
|
AC_CONFIG_FILES([
|
||||||
|
t/Makefile
|
||||||
|
t/jobs/config.nix
|
||||||
|
t/jobs/declarative/project.json
|
||||||
|
])
|
||||||
|
])
|
||||||
|
|
||||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
AC_CONFIG_COMMANDS([executable-scripts], [])
|
||||||
|
|
||||||
AC_CONFIG_HEADER([hydra-config.h])
|
AC_CONFIG_HEADER([hydra-config.h])
|
||||||
|
|||||||
@@ -74,6 +74,30 @@ following:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Populating a Cache
|
||||||
|
------------------
|
||||||
|
|
||||||
|
A common use for Hydra is to pre-build and cache derivations which
|
||||||
|
take a long time to build. While it is possible to direcly access the
|
||||||
|
Hydra server's store over SSH, a more scalable option is to upload
|
||||||
|
built derivations to a remote store like an [S3-compatible object
|
||||||
|
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||||
|
the `store_uri` parameter will cause Hydra to sign and upload
|
||||||
|
derivations as they are built:
|
||||||
|
|
||||||
|
```
|
||||||
|
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||||
|
```
|
||||||
|
|
||||||
|
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||||
|
compression on derivations to reduce CPU usage on the server, but
|
||||||
|
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||||
|
build logs because it has better browser support.
|
||||||
|
|
||||||
|
See [`nix help
|
||||||
|
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||||
|
for a description of the store URI format.
|
||||||
|
|
||||||
Statsd Configuration
|
Statsd Configuration
|
||||||
--------------------
|
--------------------
|
||||||
|
|
||||||
@@ -184,7 +208,8 @@ Example configuration:
|
|||||||
<role_mapping>
|
<role_mapping>
|
||||||
# Make all users in the hydra_admin group Hydra admins
|
# Make all users in the hydra_admin group Hydra admins
|
||||||
hydra_admin = admin
|
hydra_admin = admin
|
||||||
# Allow all users in the dev group to restart jobs and cancel builds
|
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
|
||||||
|
dev = eval-jobset
|
||||||
dev = restart-jobs
|
dev = restart-jobs
|
||||||
dev = cancel-build
|
dev = cancel-build
|
||||||
</role_mapping>
|
</role_mapping>
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ $ nix-shell
|
|||||||
To build Hydra, you should then do:
|
To build Hydra, you should then do:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[nix-shell]$ ./bootstrap
|
[nix-shell]$ autoreconfPhase
|
||||||
[nix-shell]$ configurePhase
|
[nix-shell]$ configurePhase
|
||||||
[nix-shell]$ make
|
[nix-shell]$ make
|
||||||
```
|
```
|
||||||
@@ -30,6 +30,8 @@ foreman:
|
|||||||
$ foreman start
|
$ foreman start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||||
|
|
||||||
You can run just the Hydra web server in your source tree as follows:
|
You can run just the Hydra web server in your source tree as follows:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ Sets CircleCI status.
|
|||||||
|
|
||||||
## Compress build logs
|
## Compress build logs
|
||||||
|
|
||||||
Compresses build logs after a build with bzip2.
|
Compresses build logs after a build with bzip2 or zstd.
|
||||||
|
|
||||||
### Configuration options
|
### Configuration options
|
||||||
|
|
||||||
@@ -50,6 +50,14 @@ Compresses build logs after a build with bzip2.
|
|||||||
|
|
||||||
Enable log compression
|
Enable log compression
|
||||||
|
|
||||||
|
- `compress_build_logs_compression`
|
||||||
|
|
||||||
|
Which compression format to use. Valid values are bzip2 (default) and zstd.
|
||||||
|
|
||||||
|
- `compress_build_logs_silent`
|
||||||
|
|
||||||
|
Whether to compress logs silently.
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
|
|||||||
@@ -404,3 +404,10 @@ analogous:
|
|||||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||||
|
|
||||||
|
Content-addressed derivations
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||||
|
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||||
|
|
||||||
|
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
# Webhooks
|
# Webhooks
|
||||||
|
|
||||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||||
jobset has a github repo in its input.
|
jobset has a github repo in its input.
|
||||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
|
||||||
click on `Add webhook`.
|
## GitHub
|
||||||
|
|
||||||
|
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||||
|
and in the `Webhooks` tab click on `Add webhook`.
|
||||||
|
|
||||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||||
- In `Content type` switch to `application/json`.
|
- In `Content type` switch to `application/json`.
|
||||||
@@ -11,3 +14,14 @@ click on `Add webhook`.
|
|||||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||||
|
|
||||||
Then add the hook with `Add webhook`.
|
Then add the hook with `Add webhook`.
|
||||||
|
|
||||||
|
## Gitea
|
||||||
|
|
||||||
|
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||||
|
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||||
|
|
||||||
|
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||||
|
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||||
|
- Change the branch filter to match the git branch hydra builds.
|
||||||
|
|
||||||
|
Then add the hook with `Add webhook`.
|
||||||
|
|||||||
108
flake.lock
generated
108
flake.lock
generated
@@ -16,58 +16,96 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"lowdown-src": {
|
"flake-parts": {
|
||||||
"flake": false,
|
"inputs": {
|
||||||
|
"nixpkgs-lib": [
|
||||||
|
"nix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1633514407,
|
"lastModified": 1712014858,
|
||||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
||||||
"owner": "kristapsdz",
|
"owner": "hercules-ci",
|
||||||
"repo": "lowdown",
|
"repo": "flake-parts",
|
||||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "kristapsdz",
|
"owner": "hercules-ci",
|
||||||
"repo": "lowdown",
|
"repo": "flake-parts",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1667395993,
|
||||||
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"libgit2": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1697646580,
|
||||||
|
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
||||||
|
"owner": "libgit2",
|
||||||
|
"repo": "libgit2",
|
||||||
|
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "libgit2",
|
||||||
|
"repo": "libgit2",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nix": {
|
"nix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": "flake-compat",
|
"flake-compat": "flake-compat",
|
||||||
"lowdown-src": "lowdown-src",
|
"flake-parts": "flake-parts",
|
||||||
|
"libgit2": "libgit2",
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
"nixpkgs"
|
"nixpkgs"
|
||||||
],
|
],
|
||||||
"nixpkgs-regression": "nixpkgs-regression"
|
"nixpkgs-regression": "nixpkgs-regression",
|
||||||
|
"pre-commit-hooks": "pre-commit-hooks"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1690219894,
|
"lastModified": 1713874370,
|
||||||
"narHash": "sha256-QMYAkdtU+g9HlZKtoJ+AI6TbWzzovKGnPZJHfZdclc8=",
|
"narHash": "sha256-gW1mO/CvsQQ5gvgiwzxsGhPFI/tx30NING+qgF5Do0s=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nix",
|
"repo": "nix",
|
||||||
"rev": "a212300a1d9f9c7b0daf19c00c87fc50480f54f4",
|
"rev": "1c8150ac312b5f9ba1b3f6768ff43b09867e5883",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "2.17.0",
|
"ref": "2.22-maintenance",
|
||||||
"repo": "nix",
|
"repo": "nix",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1687379288,
|
"lastModified": 1712848736,
|
||||||
"narHash": "sha256-cSuwfiqYfeVyqzCRkU9AvLTysmEuSal8nh6CYr+xWog=",
|
"narHash": "sha256-CzZwhqyLlebljv1zFS2KWVH/3byHND0LfaO1jKsGuVo=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "ef0bc3976340dab9a4e087a0bcff661a8b2e87f3",
|
"rev": "1d6a23f11e44d0fb64b3237569b87658a9eb5643",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-23.05",
|
"ref": "nixos-23.11-small",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
@@ -88,6 +126,38 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"pre-commit-hooks": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-compat": [
|
||||||
|
"nix"
|
||||||
|
],
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"gitignore": [
|
||||||
|
"nix"
|
||||||
|
],
|
||||||
|
"nixpkgs": [
|
||||||
|
"nix",
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"nixpkgs-stable": [
|
||||||
|
"nix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1712897695,
|
||||||
|
"narHash": "sha256-nMirxrGteNAl9sWiOhoN5tIHyjBbVi5e2tgZUgZlK3Y=",
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "pre-commit-hooks.nix",
|
||||||
|
"rev": "40e6053ecb65fcbf12863338a6dcefb3f55f1bf8",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "cachix",
|
||||||
|
"repo": "pre-commit-hooks.nix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nix": "nix",
|
"nix": "nix",
|
||||||
|
|||||||
582
flake.nix
582
flake.nix
@@ -1,257 +1,30 @@
|
|||||||
{
|
{
|
||||||
description = "A Nix-based continuous build system";
|
description = "A Nix-based continuous build system";
|
||||||
|
|
||||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05";
|
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11-small";
|
||||||
inputs.nix.url = "github:NixOS/nix/2.17.0";
|
inputs.nix.url = "github:NixOS/nix/2.22-maintenance";
|
||||||
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
|
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
|
||||||
outputs = { self, nixpkgs, nix }:
|
outputs = { self, nixpkgs, nix }:
|
||||||
let
|
let
|
||||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
|
|
||||||
|
|
||||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||||
forEachSystem = nixpkgs.lib.genAttrs systems;
|
forEachSystem = nixpkgs.lib.genAttrs systems;
|
||||||
|
|
||||||
|
overlayList = [ self.overlays.default nix.overlays.default ];
|
||||||
|
|
||||||
pkgsBySystem = forEachSystem (system: import nixpkgs {
|
pkgsBySystem = forEachSystem (system: import nixpkgs {
|
||||||
inherit system;
|
inherit system;
|
||||||
overlays = [ self.overlays.default nix.overlays.default ];
|
overlays = overlayList;
|
||||||
});
|
});
|
||||||
|
|
||||||
# NixOS configuration used for VM tests.
|
|
||||||
hydraServer =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
imports = [ self.nixosModules.hydraTest ];
|
|
||||||
|
|
||||||
virtualisation.memorySize = 1024;
|
|
||||||
virtualisation.writableStore = true;
|
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
|
||||||
|
|
||||||
nix = {
|
|
||||||
# Without this nix tries to fetch packages from the default
|
|
||||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
|
||||||
binaryCaches = [ ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
in
|
in
|
||||||
rec {
|
rec {
|
||||||
|
|
||||||
# A Nixpkgs overlay that provides a 'hydra' package.
|
# A Nixpkgs overlay that provides a 'hydra' package.
|
||||||
overlays.default = final: prev: {
|
overlays.default = final: prev: {
|
||||||
|
hydra = final.callPackage ./package.nix {
|
||||||
# Add LDAP dependencies that aren't currently found within nixpkgs.
|
inherit (nixpkgs.lib) fileset;
|
||||||
perlPackages = prev.perlPackages // {
|
rawSrc = self;
|
||||||
|
|
||||||
PrometheusTiny = final.perlPackages.buildPerlPackage {
|
|
||||||
pname = "Prometheus-Tiny";
|
|
||||||
version = "0.007";
|
|
||||||
src = final.fetchurl {
|
|
||||||
url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-0.007.tar.gz";
|
|
||||||
sha256 = "0ef8b226a2025cdde4df80129dd319aa29e884e653c17dc96f4823d985c028ec";
|
|
||||||
};
|
|
||||||
buildInputs = with final.perlPackages; [ HTTPMessage Plack TestException ];
|
|
||||||
meta = {
|
|
||||||
homepage = "https://github.com/robn/Prometheus-Tiny";
|
|
||||||
description = "A tiny Prometheus client";
|
|
||||||
license = with final.lib.licenses; [ artistic1 gpl1Plus ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
hydra = with final; let
|
|
||||||
perlDeps = buildEnv {
|
|
||||||
name = "hydra-perl-deps";
|
|
||||||
paths = with perlPackages; lib.closePropagation
|
|
||||||
[
|
|
||||||
AuthenSASL
|
|
||||||
CatalystActionREST
|
|
||||||
CatalystAuthenticationStoreDBIxClass
|
|
||||||
CatalystAuthenticationStoreLDAP
|
|
||||||
CatalystDevel
|
|
||||||
CatalystPluginAccessLog
|
|
||||||
CatalystPluginAuthorizationRoles
|
|
||||||
CatalystPluginCaptcha
|
|
||||||
CatalystPluginPrometheusTiny
|
|
||||||
CatalystPluginSessionStateCookie
|
|
||||||
CatalystPluginSessionStoreFastMmap
|
|
||||||
CatalystPluginStackTrace
|
|
||||||
CatalystTraitForRequestProxyBase
|
|
||||||
CatalystViewDownload
|
|
||||||
CatalystViewJSON
|
|
||||||
CatalystViewTT
|
|
||||||
CatalystXRoleApplicator
|
|
||||||
CatalystXScriptServerStarman
|
|
||||||
CryptPassphrase
|
|
||||||
CryptPassphraseArgon2
|
|
||||||
CryptRandPasswd
|
|
||||||
DataDump
|
|
||||||
DateTime
|
|
||||||
DBDPg
|
|
||||||
DBDSQLite
|
|
||||||
DigestSHA1
|
|
||||||
EmailMIME
|
|
||||||
EmailSender
|
|
||||||
FileLibMagic
|
|
||||||
FileSlurper
|
|
||||||
FileWhich
|
|
||||||
final.nix.perl-bindings
|
|
||||||
git
|
|
||||||
IOCompress
|
|
||||||
IPCRun
|
|
||||||
IPCRun3
|
|
||||||
JSON
|
|
||||||
JSONMaybeXS
|
|
||||||
JSONXS
|
|
||||||
ListSomeUtils
|
|
||||||
LWP
|
|
||||||
LWPProtocolHttps
|
|
||||||
ModulePluggable
|
|
||||||
NetAmazonS3
|
|
||||||
NetPrometheus
|
|
||||||
NetStatsd
|
|
||||||
PadWalker
|
|
||||||
ParallelForkManager
|
|
||||||
PerlCriticCommunity
|
|
||||||
PrometheusTinyShared
|
|
||||||
ReadonlyX
|
|
||||||
SetScalar
|
|
||||||
SQLSplitStatement
|
|
||||||
Starman
|
|
||||||
StringCompareConstantTime
|
|
||||||
SysHostnameLong
|
|
||||||
TermSizeAny
|
|
||||||
TermReadKey
|
|
||||||
Test2Harness
|
|
||||||
TestPostgreSQL
|
|
||||||
TextDiff
|
|
||||||
TextTable
|
|
||||||
UUID4Tiny
|
|
||||||
YAML
|
|
||||||
XMLSimple
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
in
|
|
||||||
stdenv.mkDerivation {
|
|
||||||
|
|
||||||
name = "hydra-${version}";
|
|
||||||
|
|
||||||
src = self;
|
|
||||||
|
|
||||||
buildInputs =
|
|
||||||
[
|
|
||||||
makeWrapper
|
|
||||||
autoconf
|
|
||||||
automake
|
|
||||||
libtool
|
|
||||||
unzip
|
|
||||||
nukeReferences
|
|
||||||
pkg-config
|
|
||||||
libpqxx
|
|
||||||
top-git
|
|
||||||
mercurial
|
|
||||||
darcs
|
|
||||||
subversion
|
|
||||||
breezy
|
|
||||||
openssl
|
|
||||||
bzip2
|
|
||||||
libxslt
|
|
||||||
final.nix
|
|
||||||
perlDeps
|
|
||||||
perl
|
|
||||||
mdbook
|
|
||||||
pixz
|
|
||||||
boost
|
|
||||||
postgresql_13
|
|
||||||
(if lib.versionAtLeast lib.version "20.03pre"
|
|
||||||
then nlohmann_json
|
|
||||||
else nlohmann_json.override { multipleHeaders = true; })
|
|
||||||
prometheus-cpp
|
|
||||||
];
|
|
||||||
|
|
||||||
checkInputs = [
|
|
||||||
cacert
|
|
||||||
foreman
|
|
||||||
glibcLocales
|
|
||||||
libressl.nc
|
|
||||||
openldap
|
|
||||||
python3
|
|
||||||
];
|
|
||||||
|
|
||||||
hydraPath = lib.makeBinPath (
|
|
||||||
[
|
|
||||||
subversion
|
|
||||||
openssh
|
|
||||||
final.nix
|
|
||||||
coreutils
|
|
||||||
findutils
|
|
||||||
pixz
|
|
||||||
gzip
|
|
||||||
bzip2
|
|
||||||
xz
|
|
||||||
gnutar
|
|
||||||
unzip
|
|
||||||
git
|
|
||||||
top-git
|
|
||||||
mercurial
|
|
||||||
darcs
|
|
||||||
gnused
|
|
||||||
breezy
|
|
||||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
|
||||||
);
|
|
||||||
|
|
||||||
OPENLDAP_ROOT = openldap;
|
|
||||||
|
|
||||||
shellHook = ''
|
|
||||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
|
||||||
|
|
||||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
|
||||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
|
||||||
export HYDRA_HOME="$(pwd)/src/"
|
|
||||||
mkdir -p .hydra-data
|
|
||||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
|
||||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
|
||||||
|
|
||||||
popd >/dev/null
|
|
||||||
'';
|
|
||||||
|
|
||||||
preConfigure = "autoreconf -vfi";
|
|
||||||
|
|
||||||
NIX_LDFLAGS = [ "-lpthread" ];
|
|
||||||
|
|
||||||
enableParallelBuilding = true;
|
|
||||||
|
|
||||||
doCheck = true;
|
|
||||||
|
|
||||||
preCheck = ''
|
|
||||||
patchShebangs .
|
|
||||||
export LOGNAME=''${LOGNAME:-foo}
|
|
||||||
# set $HOME for bzr so it can create its trace file
|
|
||||||
export HOME=$(mktemp -d)
|
|
||||||
'';
|
|
||||||
|
|
||||||
postInstall = ''
|
|
||||||
mkdir -p $out/nix-support
|
|
||||||
|
|
||||||
for i in $out/bin/*; do
|
|
||||||
read -n 4 chars < $i
|
|
||||||
if [[ $chars =~ ELF ]]; then continue; fi
|
|
||||||
wrapProgram $i \
|
|
||||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
|
||||||
--prefix PATH ':' $out/bin:$hydraPath \
|
|
||||||
--set HYDRA_RELEASE ${version} \
|
|
||||||
--set HYDRA_HOME $out/libexec/hydra \
|
|
||||||
--set NIX_RELEASE ${final.nix.name or "unknown"}
|
|
||||||
done
|
|
||||||
'';
|
|
||||||
|
|
||||||
dontStrip = true;
|
|
||||||
|
|
||||||
meta.description = "Build of Hydra on ${final.stdenv.system}";
|
|
||||||
passthru = { inherit perlDeps; inherit (final) nix; };
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -259,9 +32,15 @@
|
|||||||
|
|
||||||
build = forEachSystem (system: packages.${system}.hydra);
|
build = forEachSystem (system: packages.${system}.hydra);
|
||||||
|
|
||||||
|
buildNoTests = forEachSystem (system:
|
||||||
|
packages.${system}.hydra.overrideAttrs (_: {
|
||||||
|
doCheck = false;
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
manual = forEachSystem (system:
|
manual = forEachSystem (system:
|
||||||
let pkgs = pkgsBySystem.${system}; in
|
let pkgs = pkgsBySystem.${system}; in
|
||||||
pkgs.runCommand "hydra-manual-${version}" { }
|
pkgs.runCommand "hydra-manual-${pkgs.hydra.version}" { }
|
||||||
''
|
''
|
||||||
mkdir -p $out/share
|
mkdir -p $out/share
|
||||||
cp -prvd ${pkgs.hydra}/share/doc $out/share/
|
cp -prvd ${pkgs.hydra}/share/doc $out/share/
|
||||||
@@ -270,286 +49,9 @@
|
|||||||
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
||||||
'');
|
'');
|
||||||
|
|
||||||
tests.install = forEachSystem (system:
|
tests = import ./nixos-tests.nix {
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
inherit forEachSystem nixpkgs pkgsBySystem nixosModules;
|
||||||
simpleTest {
|
};
|
||||||
name = "hydra-install";
|
|
||||||
nodes.machine = hydraServer;
|
|
||||||
testScript =
|
|
||||||
''
|
|
||||||
machine.wait_for_job("hydra-init")
|
|
||||||
machine.wait_for_job("hydra-server")
|
|
||||||
machine.wait_for_job("hydra-evaluator")
|
|
||||||
machine.wait_for_job("hydra-queue-runner")
|
|
||||||
machine.wait_for_open_port(3000)
|
|
||||||
machine.succeed("curl --fail http://localhost:3000/")
|
|
||||||
'';
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.notifications = forEachSystem (system:
|
|
||||||
let pkgs = pkgsBySystem.${system}; in
|
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
|
||||||
simpleTest {
|
|
||||||
name = "hydra-notifications";
|
|
||||||
nodes.machine = { pkgs, ... }: {
|
|
||||||
imports = [ hydraServer ];
|
|
||||||
services.hydra-dev.extraConfig = ''
|
|
||||||
<influxdb>
|
|
||||||
url = http://127.0.0.1:8086
|
|
||||||
db = hydra
|
|
||||||
</influxdb>
|
|
||||||
'';
|
|
||||||
services.influxdb.enable = true;
|
|
||||||
};
|
|
||||||
testScript = ''
|
|
||||||
machine.wait_for_job("hydra-init")
|
|
||||||
|
|
||||||
# Create an admin account and some other state.
|
|
||||||
machine.succeed(
|
|
||||||
"""
|
|
||||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
|
||||||
mkdir /run/jobset
|
|
||||||
chmod 755 /run/jobset
|
|
||||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
|
||||||
chmod 644 /run/jobset/default.nix
|
|
||||||
chown -R hydra /run/jobset
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
# Wait until InfluxDB can receive web requests
|
|
||||||
machine.wait_for_job("influxdb")
|
|
||||||
machine.wait_for_open_port(8086)
|
|
||||||
|
|
||||||
# Create an InfluxDB database where hydra will write to
|
|
||||||
machine.succeed(
|
|
||||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
|
||||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Wait until hydra-server can receive HTTP requests
|
|
||||||
machine.wait_for_job("hydra-server")
|
|
||||||
machine.wait_for_open_port(3000)
|
|
||||||
|
|
||||||
# Setup the project and jobset
|
|
||||||
machine.succeed(
|
|
||||||
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Wait until hydra has build the job and
|
|
||||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
|
||||||
machine.wait_until_succeeds(
|
|
||||||
"curl -s -H 'Accept: application/csv' "
|
|
||||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
|
||||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
|
||||||
)
|
|
||||||
'';
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.gitea = forEachSystem (system:
|
|
||||||
let pkgs = pkgsBySystem.${system}; in
|
|
||||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
|
||||||
makeTest {
|
|
||||||
name = "hydra-gitea";
|
|
||||||
nodes.machine = { pkgs, ... }: {
|
|
||||||
imports = [ hydraServer ];
|
|
||||||
services.hydra-dev.extraConfig = ''
|
|
||||||
<gitea_authorization>
|
|
||||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
|
||||||
</gitea_authorization>
|
|
||||||
'';
|
|
||||||
nix = {
|
|
||||||
distributedBuilds = true;
|
|
||||||
buildMachines = [{
|
|
||||||
hostName = "localhost";
|
|
||||||
systems = [ system ];
|
|
||||||
}];
|
|
||||||
binaryCaches = [ ];
|
|
||||||
};
|
|
||||||
services.gitea = {
|
|
||||||
enable = true;
|
|
||||||
database.type = "postgres";
|
|
||||||
disableRegistration = true;
|
|
||||||
httpPort = 3001;
|
|
||||||
};
|
|
||||||
services.openssh.enable = true;
|
|
||||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
|
||||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
|
||||||
};
|
|
||||||
skipLint = true;
|
|
||||||
testScript =
|
|
||||||
let
|
|
||||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
|
||||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7');
|
|
||||||
'';
|
|
||||||
|
|
||||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
|
||||||
set -x
|
|
||||||
mkdir -p /tmp/repo $HOME/.ssh
|
|
||||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
|
||||||
chmod 0400 $HOME/.ssh/privk
|
|
||||||
git -C /tmp/repo init
|
|
||||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
|
||||||
git -C /tmp/repo add .
|
|
||||||
git config --global user.email test@localhost
|
|
||||||
git config --global user.name test
|
|
||||||
git -C /tmp/repo commit -m 'Initial import'
|
|
||||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
|
||||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
|
||||||
git -C /tmp/repo push origin master
|
|
||||||
git -C /tmp/repo log >&2
|
|
||||||
'';
|
|
||||||
|
|
||||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
|
||||||
set -x
|
|
||||||
su -l hydra -c "hydra-create-user root --email-address \
|
|
||||||
'alice@example.org' --password foobar --role admin"
|
|
||||||
|
|
||||||
URL=http://localhost:3000
|
|
||||||
USERNAME="root"
|
|
||||||
PASSWORD="foobar"
|
|
||||||
PROJECT_NAME="trivial"
|
|
||||||
JOBSET_NAME="trivial"
|
|
||||||
mycurl() {
|
|
||||||
curl --referer $URL -H "Accept: application/json" \
|
|
||||||
-H "Content-Type: application/json" $@
|
|
||||||
}
|
|
||||||
|
|
||||||
cat >data.json <<EOF
|
|
||||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
|
||||||
EOF
|
|
||||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
|
||||||
|
|
||||||
cat >data.json <<EOF
|
|
||||||
{
|
|
||||||
"displayname":"Trivial",
|
|
||||||
"enabled":"1",
|
|
||||||
"visible":"1"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
|
||||||
-d @data.json -b hydra-cookie.txt
|
|
||||||
|
|
||||||
cat >data.json <<EOF
|
|
||||||
{
|
|
||||||
"description": "Trivial",
|
|
||||||
"checkinterval": "60",
|
|
||||||
"enabled": "1",
|
|
||||||
"visible": "1",
|
|
||||||
"keepnr": "1",
|
|
||||||
"enableemail": true,
|
|
||||||
"emailoverride": "hydra@localhost",
|
|
||||||
"type": 0,
|
|
||||||
"nixexprinput": "git",
|
|
||||||
"nixexprpath": "jobset.nix",
|
|
||||||
"inputs": {
|
|
||||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
|
||||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
|
||||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
|
||||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
|
||||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
|
||||||
-d @data.json -b hydra-cookie.txt
|
|
||||||
'';
|
|
||||||
|
|
||||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
|
||||||
|
|
||||||
snakeoilKeypair = {
|
|
||||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
|
||||||
-----BEGIN EC PRIVATE KEY-----
|
|
||||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
|
||||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
|
||||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
|
||||||
-----END EC PRIVATE KEY-----
|
|
||||||
'';
|
|
||||||
|
|
||||||
pubkey = pkgs.lib.concatStrings [
|
|
||||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
|
||||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
|
||||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
|
||||||
{ trivial = builtins.derivation {
|
|
||||||
name = "trivial";
|
|
||||||
system = "${system}";
|
|
||||||
builder = "/bin/sh";
|
|
||||||
allowSubstitutes = false;
|
|
||||||
preferLocalBuild = true;
|
|
||||||
args = ["-c" "echo success > $out; exit 0"];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
''
|
|
||||||
import json
|
|
||||||
|
|
||||||
machine.start()
|
|
||||||
machine.wait_for_unit("multi-user.target")
|
|
||||||
machine.wait_for_open_port(3000)
|
|
||||||
machine.wait_for_open_port(3001)
|
|
||||||
|
|
||||||
machine.succeed(
|
|
||||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
|
||||||
+ "--username root --password root --email test@localhost'"
|
|
||||||
)
|
|
||||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
|
||||||
|
|
||||||
machine.succeed(
|
|
||||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
|
||||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
|
||||||
+ f"-H 'Authorization: token ${api_token}'"
|
|
||||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
|
||||||
)
|
|
||||||
|
|
||||||
machine.succeed(
|
|
||||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
|
||||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
|
||||||
+ f"-H 'Authorization: token ${api_token}'"
|
|
||||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
|
||||||
)
|
|
||||||
|
|
||||||
machine.succeed(
|
|
||||||
"${scripts.git-setup}"
|
|
||||||
)
|
|
||||||
|
|
||||||
machine.succeed(
|
|
||||||
"${scripts.hydra-setup}"
|
|
||||||
)
|
|
||||||
|
|
||||||
machine.wait_until_succeeds(
|
|
||||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
|
||||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
|
||||||
)
|
|
||||||
|
|
||||||
data = machine.succeed(
|
|
||||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
|
||||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
|
||||||
+ f"-H 'Authorization: token ${api_token}'"
|
|
||||||
)
|
|
||||||
|
|
||||||
response = json.loads(data)
|
|
||||||
|
|
||||||
assert len(response) == 2, "Expected exactly two status updates for latest commit!"
|
|
||||||
assert response[0]['status'] == "success", "Expected latest status to be success!"
|
|
||||||
assert response[1]['status'] == "pending", "Expected first status to be pending!"
|
|
||||||
|
|
||||||
machine.shutdown()
|
|
||||||
'';
|
|
||||||
});
|
|
||||||
|
|
||||||
tests.validate-openapi = forEachSystem (system:
|
|
||||||
let pkgs = pkgsBySystem.${system}; in
|
|
||||||
pkgs.runCommand "validate-openapi"
|
|
||||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
|
||||||
''
|
|
||||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
|
||||||
touch $out
|
|
||||||
'');
|
|
||||||
|
|
||||||
container = nixosConfigurations.container.config.system.build.toplevel;
|
container = nixosConfigurations.container.config.system.build.toplevel;
|
||||||
};
|
};
|
||||||
@@ -565,56 +67,16 @@
|
|||||||
default = pkgsBySystem.${system}.hydra;
|
default = pkgsBySystem.${system}.hydra;
|
||||||
});
|
});
|
||||||
|
|
||||||
nixosModules.hydra = {
|
nixosModules = import ./nixos-modules {
|
||||||
imports = [ ./hydra-module.nix ];
|
overlays = overlayList;
|
||||||
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nixosModules.hydraTest = { pkgs, ... }: {
|
|
||||||
imports = [ self.nixosModules.hydra ];
|
|
||||||
|
|
||||||
services.hydra-dev.enable = true;
|
|
||||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
|
||||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
|
||||||
|
|
||||||
systemd.services.hydra-send-stats.enable = false;
|
|
||||||
|
|
||||||
services.postgresql.enable = true;
|
|
||||||
services.postgresql.package = pkgs.postgresql_11;
|
|
||||||
|
|
||||||
# The following is to work around the following error from hydra-server:
|
|
||||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
|
||||||
time.timeZone = "UTC";
|
|
||||||
|
|
||||||
nix.extraOptions = ''
|
|
||||||
allowed-uris = https://github.com/
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
nixosModules.hydraProxy = {
|
|
||||||
services.httpd = {
|
|
||||||
enable = true;
|
|
||||||
adminAddr = "hydra-admin@example.org";
|
|
||||||
extraConfig = ''
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
|
|
||||||
ProxyRequests Off
|
|
||||||
ProxyPreserveHost On
|
|
||||||
ProxyPass /apache-errors !
|
|
||||||
ErrorDocument 503 /apache-errors/503.html
|
|
||||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
|
||||||
ProxyPassReverse / http://127.0.0.1:3000/
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
||||||
system = "x86_64-linux";
|
system = "x86_64-linux";
|
||||||
modules =
|
modules =
|
||||||
[
|
[
|
||||||
|
self.nixosModules.hydra
|
||||||
|
self.nixosModules.overlayNixpkgsForThisHydra
|
||||||
self.nixosModules.hydraTest
|
self.nixosModules.hydraTest
|
||||||
self.nixosModules.hydraProxy
|
self.nixosModules.hydraProxy
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ paths:
|
|||||||
$ref: '#/components/examples/projects-success'
|
$ref: '#/components/examples/projects-success'
|
||||||
|
|
||||||
/api/push:
|
/api/push:
|
||||||
put:
|
post:
|
||||||
summary: trigger jobsets
|
summary: trigger jobsets
|
||||||
parameters:
|
parameters:
|
||||||
- in: query
|
- in: query
|
||||||
|
|||||||
49
nixos-modules/default.nix
Normal file
49
nixos-modules/default.nix
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
{ overlays }:
|
||||||
|
|
||||||
|
{
|
||||||
|
hydra = import ./hydra.nix;
|
||||||
|
|
||||||
|
overlayNixpkgsForThisHydra = { pkgs, ... }: {
|
||||||
|
nixpkgs = { inherit overlays; };
|
||||||
|
services.hydra.package = pkgs.hydra;
|
||||||
|
};
|
||||||
|
|
||||||
|
hydraTest = { pkgs, ... }: {
|
||||||
|
services.hydra-dev.enable = true;
|
||||||
|
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||||
|
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||||
|
|
||||||
|
systemd.services.hydra-send-stats.enable = false;
|
||||||
|
|
||||||
|
services.postgresql.enable = true;
|
||||||
|
services.postgresql.package = pkgs.postgresql_12;
|
||||||
|
|
||||||
|
# The following is to work around the following error from hydra-server:
|
||||||
|
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||||
|
time.timeZone = "UTC";
|
||||||
|
|
||||||
|
nix.extraOptions = ''
|
||||||
|
allowed-uris = https://github.com/
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
hydraProxy = {
|
||||||
|
services.httpd = {
|
||||||
|
enable = true;
|
||||||
|
adminAddr = "hydra-admin@example.org";
|
||||||
|
extraConfig = ''
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
|
||||||
|
ProxyRequests Off
|
||||||
|
ProxyPreserveHost On
|
||||||
|
ProxyPass /apache-errors !
|
||||||
|
ErrorDocument 503 /apache-errors/503.html
|
||||||
|
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||||
|
ProxyPassReverse / http://127.0.0.1:3000/
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -68,7 +68,7 @@ in
|
|||||||
|
|
||||||
package = mkOption {
|
package = mkOption {
|
||||||
type = types.path;
|
type = types.path;
|
||||||
default = pkgs.hydra;
|
default = pkgs.hydra_unstable;
|
||||||
defaultText = literalExpression "pkgs.hydra";
|
defaultText = literalExpression "pkgs.hydra";
|
||||||
description = "The Hydra package.";
|
description = "The Hydra package.";
|
||||||
};
|
};
|
||||||
@@ -233,7 +233,7 @@ in
|
|||||||
gc-keep-outputs = true;
|
gc-keep-outputs = true;
|
||||||
gc-keep-derivations = true;
|
gc-keep-derivations = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
services.hydra-dev.extraConfig =
|
services.hydra-dev.extraConfig =
|
||||||
''
|
''
|
||||||
using_frontend_proxy = 1
|
using_frontend_proxy = 1
|
||||||
@@ -408,6 +408,7 @@ in
|
|||||||
requires = [ "hydra-init.service" ];
|
requires = [ "hydra-init.service" ];
|
||||||
after = [ "hydra-init.service" ];
|
after = [ "hydra-init.service" ];
|
||||||
restartTriggers = [ hydraConf ];
|
restartTriggers = [ hydraConf ];
|
||||||
|
path = [ pkgs.zstd ];
|
||||||
environment = env // {
|
environment = env // {
|
||||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||||
@@ -458,10 +459,15 @@ in
|
|||||||
# logs automatically after a step finishes, but this doesn't work
|
# logs automatically after a step finishes, but this doesn't work
|
||||||
# if the queue runner is stopped prematurely.
|
# if the queue runner is stopped prematurely.
|
||||||
systemd.services.hydra-compress-logs =
|
systemd.services.hydra-compress-logs =
|
||||||
{ path = [ pkgs.bzip2 ];
|
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||||
script =
|
script =
|
||||||
''
|
''
|
||||||
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
|
set -eou pipefail
|
||||||
|
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||||
|
if [[ $compression == zstd ]]; then
|
||||||
|
compression="zstd --rm"
|
||||||
|
fi
|
||||||
|
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r $compression --force --quiet
|
||||||
'';
|
'';
|
||||||
startAt = "Sun 01:45";
|
startAt = "Sun 01:45";
|
||||||
};
|
};
|
||||||
309
nixos-tests.nix
Normal file
309
nixos-tests.nix
Normal file
@@ -0,0 +1,309 @@
|
|||||||
|
{ forEachSystem, nixpkgs, pkgsBySystem, nixosModules }:
|
||||||
|
|
||||||
|
let
|
||||||
|
# NixOS configuration used for VM tests.
|
||||||
|
hydraServer =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
nixosModules.hydra
|
||||||
|
nixosModules.overlayNixpkgsForThisHydra
|
||||||
|
nixosModules.hydraTest
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.memorySize = 1024;
|
||||||
|
virtualisation.writableStore = true;
|
||||||
|
|
||||||
|
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||||
|
|
||||||
|
nix = {
|
||||||
|
# Without this nix tries to fetch packages from the default
|
||||||
|
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||||
|
settings.substituters = [ ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
install = forEachSystem (system:
|
||||||
|
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||||
|
simpleTest {
|
||||||
|
name = "hydra-install";
|
||||||
|
nodes.machine = hydraServer;
|
||||||
|
testScript =
|
||||||
|
''
|
||||||
|
machine.wait_for_job("hydra-init")
|
||||||
|
machine.wait_for_job("hydra-server")
|
||||||
|
machine.wait_for_job("hydra-evaluator")
|
||||||
|
machine.wait_for_job("hydra-queue-runner")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
machine.succeed("curl --fail http://localhost:3000/")
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
notifications = forEachSystem (system:
|
||||||
|
let pkgs = pkgsBySystem.${system}; in
|
||||||
|
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||||
|
simpleTest {
|
||||||
|
name = "hydra-notifications";
|
||||||
|
nodes.machine = { pkgs, ... }: {
|
||||||
|
imports = [ hydraServer ];
|
||||||
|
services.hydra-dev.extraConfig = ''
|
||||||
|
<influxdb>
|
||||||
|
url = http://127.0.0.1:8086
|
||||||
|
db = hydra
|
||||||
|
</influxdb>
|
||||||
|
'';
|
||||||
|
services.influxdb.enable = true;
|
||||||
|
};
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_job("hydra-init")
|
||||||
|
|
||||||
|
# Create an admin account and some other state.
|
||||||
|
machine.succeed(
|
||||||
|
"""
|
||||||
|
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||||
|
mkdir /run/jobset
|
||||||
|
chmod 755 /run/jobset
|
||||||
|
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||||
|
chmod 644 /run/jobset/default.nix
|
||||||
|
chown -R hydra /run/jobset
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait until InfluxDB can receive web requests
|
||||||
|
machine.wait_for_job("influxdb")
|
||||||
|
machine.wait_for_open_port(8086)
|
||||||
|
|
||||||
|
# Create an InfluxDB database where hydra will write to
|
||||||
|
machine.succeed(
|
||||||
|
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||||
|
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait until hydra-server can receive HTTP requests
|
||||||
|
machine.wait_for_job("hydra-server")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
|
||||||
|
# Setup the project and jobset
|
||||||
|
machine.succeed(
|
||||||
|
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait until hydra has build the job and
|
||||||
|
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -s -H 'Accept: application/csv' "
|
||||||
|
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||||
|
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
gitea = forEachSystem (system:
|
||||||
|
let pkgs = pkgsBySystem.${system}; in
|
||||||
|
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||||
|
makeTest {
|
||||||
|
name = "hydra-gitea";
|
||||||
|
nodes.machine = { pkgs, ... }: {
|
||||||
|
imports = [ hydraServer ];
|
||||||
|
services.hydra-dev.extraConfig = ''
|
||||||
|
<gitea_authorization>
|
||||||
|
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||||
|
</gitea_authorization>
|
||||||
|
'';
|
||||||
|
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||||
|
nix = {
|
||||||
|
settings.substituters = [ ];
|
||||||
|
};
|
||||||
|
services.gitea = {
|
||||||
|
enable = true;
|
||||||
|
database.type = "postgres";
|
||||||
|
settings = {
|
||||||
|
service.DISABLE_REGISTRATION = true;
|
||||||
|
server.HTTP_PORT = 3001;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
services.openssh.enable = true;
|
||||||
|
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||||
|
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||||
|
};
|
||||||
|
skipLint = true;
|
||||||
|
testScript =
|
||||||
|
let
|
||||||
|
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||||
|
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||||
|
'';
|
||||||
|
|
||||||
|
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||||
|
set -x
|
||||||
|
mkdir -p /tmp/repo $HOME/.ssh
|
||||||
|
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||||
|
chmod 0400 $HOME/.ssh/privk
|
||||||
|
git -C /tmp/repo init
|
||||||
|
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||||
|
git -C /tmp/repo add .
|
||||||
|
git config --global user.email test@localhost
|
||||||
|
git config --global user.name test
|
||||||
|
git -C /tmp/repo commit -m 'Initial import'
|
||||||
|
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||||
|
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||||
|
git -C /tmp/repo push origin master
|
||||||
|
git -C /tmp/repo log >&2
|
||||||
|
'';
|
||||||
|
|
||||||
|
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||||
|
set -x
|
||||||
|
su -l hydra -c "hydra-create-user root --email-address \
|
||||||
|
'alice@example.org' --password foobar --role admin"
|
||||||
|
|
||||||
|
URL=http://localhost:3000
|
||||||
|
USERNAME="root"
|
||||||
|
PASSWORD="foobar"
|
||||||
|
PROJECT_NAME="trivial"
|
||||||
|
JOBSET_NAME="trivial"
|
||||||
|
mycurl() {
|
||||||
|
curl --referer $URL -H "Accept: application/json" \
|
||||||
|
-H "Content-Type: application/json" $@
|
||||||
|
}
|
||||||
|
|
||||||
|
cat >data.json <<EOF
|
||||||
|
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||||
|
EOF
|
||||||
|
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||||
|
|
||||||
|
cat >data.json <<EOF
|
||||||
|
{
|
||||||
|
"displayname":"Trivial",
|
||||||
|
"enabled":"1",
|
||||||
|
"visible":"1"
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||||
|
-d @data.json -b hydra-cookie.txt
|
||||||
|
|
||||||
|
cat >data.json <<EOF
|
||||||
|
{
|
||||||
|
"description": "Trivial",
|
||||||
|
"checkinterval": "60",
|
||||||
|
"enabled": "1",
|
||||||
|
"visible": "1",
|
||||||
|
"keepnr": "1",
|
||||||
|
"enableemail": true,
|
||||||
|
"emailoverride": "hydra@localhost",
|
||||||
|
"type": 0,
|
||||||
|
"nixexprinput": "git",
|
||||||
|
"nixexprpath": "jobset.nix",
|
||||||
|
"inputs": {
|
||||||
|
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||||
|
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||||
|
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||||
|
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||||
|
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||||
|
-d @data.json -b hydra-cookie.txt
|
||||||
|
'';
|
||||||
|
|
||||||
|
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||||
|
|
||||||
|
snakeoilKeypair = {
|
||||||
|
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||||
|
-----BEGIN EC PRIVATE KEY-----
|
||||||
|
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||||
|
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||||
|
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||||
|
-----END EC PRIVATE KEY-----
|
||||||
|
'';
|
||||||
|
|
||||||
|
pubkey = pkgs.lib.concatStrings [
|
||||||
|
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||||
|
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||||
|
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||||
|
{ trivial = builtins.derivation {
|
||||||
|
name = "trivial";
|
||||||
|
system = "${system}";
|
||||||
|
builder = "/bin/sh";
|
||||||
|
allowSubstitutes = false;
|
||||||
|
preferLocalBuild = true;
|
||||||
|
args = ["-c" "echo success > $out; exit 0"];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
''
|
||||||
|
import json
|
||||||
|
|
||||||
|
machine.start()
|
||||||
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
machine.wait_for_open_port(3001)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||||
|
+ "--username root --password root --email test@localhost'"
|
||||||
|
)
|
||||||
|
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"${scripts.git-setup}"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.succeed(
|
||||||
|
"${scripts.hydra-setup}"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||||
|
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||||
|
)
|
||||||
|
|
||||||
|
data = machine.succeed(
|
||||||
|
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||||
|
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||||
|
+ f"-H 'Authorization: token ${api_token}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
response = json.loads(data)
|
||||||
|
|
||||||
|
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||||
|
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||||
|
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||||
|
|
||||||
|
machine.shutdown()
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
|
validate-openapi = forEachSystem (system:
|
||||||
|
let pkgs = pkgsBySystem.${system}; in
|
||||||
|
pkgs.runCommand "validate-openapi"
|
||||||
|
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||||
|
''
|
||||||
|
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||||
|
touch $out
|
||||||
|
'');
|
||||||
|
|
||||||
|
}
|
||||||
272
package.nix
Normal file
272
package.nix
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
{ stdenv
|
||||||
|
, lib
|
||||||
|
, fileset
|
||||||
|
|
||||||
|
, rawSrc
|
||||||
|
|
||||||
|
, buildEnv
|
||||||
|
|
||||||
|
, perlPackages
|
||||||
|
|
||||||
|
, nix
|
||||||
|
, git
|
||||||
|
|
||||||
|
, makeWrapper
|
||||||
|
, autoreconfHook
|
||||||
|
, nukeReferences
|
||||||
|
, pkg-config
|
||||||
|
, mdbook
|
||||||
|
|
||||||
|
, unzip
|
||||||
|
, libpqxx
|
||||||
|
, top-git
|
||||||
|
, mercurial
|
||||||
|
, darcs
|
||||||
|
, subversion
|
||||||
|
, breezy
|
||||||
|
, openssl
|
||||||
|
, bzip2
|
||||||
|
, libxslt
|
||||||
|
, perl
|
||||||
|
, pixz
|
||||||
|
, boost
|
||||||
|
, postgresql_13
|
||||||
|
, nlohmann_json
|
||||||
|
, prometheus-cpp
|
||||||
|
|
||||||
|
, cacert
|
||||||
|
, foreman
|
||||||
|
, glibcLocales
|
||||||
|
, libressl
|
||||||
|
, openldap
|
||||||
|
, python3
|
||||||
|
|
||||||
|
, openssh
|
||||||
|
, coreutils
|
||||||
|
, findutils
|
||||||
|
, gzip
|
||||||
|
, xz
|
||||||
|
, gnutar
|
||||||
|
, gnused
|
||||||
|
|
||||||
|
, rpm
|
||||||
|
, dpkg
|
||||||
|
, cdrkit
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
perlDeps = buildEnv {
|
||||||
|
name = "hydra-perl-deps";
|
||||||
|
paths = lib.closePropagation
|
||||||
|
([
|
||||||
|
nix.perl-bindings
|
||||||
|
git
|
||||||
|
] ++ (with perlPackages; [
|
||||||
|
AuthenSASL
|
||||||
|
CatalystActionREST
|
||||||
|
CatalystAuthenticationStoreDBIxClass
|
||||||
|
CatalystAuthenticationStoreLDAP
|
||||||
|
CatalystDevel
|
||||||
|
CatalystPluginAccessLog
|
||||||
|
CatalystPluginAuthorizationRoles
|
||||||
|
CatalystPluginCaptcha
|
||||||
|
CatalystPluginPrometheusTiny
|
||||||
|
CatalystPluginSessionStateCookie
|
||||||
|
CatalystPluginSessionStoreFastMmap
|
||||||
|
CatalystPluginStackTrace
|
||||||
|
CatalystTraitForRequestProxyBase
|
||||||
|
CatalystViewDownload
|
||||||
|
CatalystViewJSON
|
||||||
|
CatalystViewTT
|
||||||
|
CatalystXRoleApplicator
|
||||||
|
CatalystXScriptServerStarman
|
||||||
|
CryptPassphrase
|
||||||
|
CryptPassphraseArgon2
|
||||||
|
CryptRandPasswd
|
||||||
|
DataDump
|
||||||
|
DateTime
|
||||||
|
DBDPg
|
||||||
|
DBDSQLite
|
||||||
|
DigestSHA1
|
||||||
|
EmailMIME
|
||||||
|
EmailSender
|
||||||
|
FileLibMagic
|
||||||
|
FileSlurper
|
||||||
|
FileWhich
|
||||||
|
IOCompress
|
||||||
|
IPCRun
|
||||||
|
IPCRun3
|
||||||
|
JSON
|
||||||
|
JSONMaybeXS
|
||||||
|
JSONXS
|
||||||
|
ListSomeUtils
|
||||||
|
LWP
|
||||||
|
LWPProtocolHttps
|
||||||
|
ModulePluggable
|
||||||
|
NetAmazonS3
|
||||||
|
NetPrometheus
|
||||||
|
NetStatsd
|
||||||
|
PadWalker
|
||||||
|
ParallelForkManager
|
||||||
|
PerlCriticCommunity
|
||||||
|
PrometheusTinyShared
|
||||||
|
ReadonlyX
|
||||||
|
SetScalar
|
||||||
|
SQLSplitStatement
|
||||||
|
Starman
|
||||||
|
StringCompareConstantTime
|
||||||
|
SysHostnameLong
|
||||||
|
TermSizeAny
|
||||||
|
TermReadKey
|
||||||
|
Test2Harness
|
||||||
|
TestPostgreSQL
|
||||||
|
TextDiff
|
||||||
|
TextTable
|
||||||
|
UUID4Tiny
|
||||||
|
YAML
|
||||||
|
XMLSimple
|
||||||
|
]));
|
||||||
|
};
|
||||||
|
|
||||||
|
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||||
|
in
|
||||||
|
stdenv.mkDerivation (finalAttrs: {
|
||||||
|
pname = "hydra";
|
||||||
|
inherit version;
|
||||||
|
|
||||||
|
src = fileset.toSource {
|
||||||
|
root = ./.;
|
||||||
|
fileset = fileset.unions ([
|
||||||
|
./version.txt
|
||||||
|
./configure.ac
|
||||||
|
./Makefile.am
|
||||||
|
./src
|
||||||
|
./doc
|
||||||
|
./nixos-modules/hydra.nix
|
||||||
|
# These are always needed to appease Automake
|
||||||
|
./t/Makefile.am
|
||||||
|
./t/jobs/config.nix.in
|
||||||
|
./t/jobs/declarative/project.json.in
|
||||||
|
] ++ lib.optionals finalAttrs.doCheck [
|
||||||
|
./t
|
||||||
|
./.perlcriticrc
|
||||||
|
./.yath.rc
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
strictDeps = true;
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
makeWrapper
|
||||||
|
autoreconfHook
|
||||||
|
nukeReferences
|
||||||
|
pkg-config
|
||||||
|
mdbook
|
||||||
|
nix
|
||||||
|
perlDeps
|
||||||
|
perl
|
||||||
|
unzip
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = [
|
||||||
|
libpqxx
|
||||||
|
openssl
|
||||||
|
libxslt
|
||||||
|
nix
|
||||||
|
perlDeps
|
||||||
|
perl
|
||||||
|
boost
|
||||||
|
nlohmann_json
|
||||||
|
prometheus-cpp
|
||||||
|
];
|
||||||
|
|
||||||
|
nativeCheckInputs = [
|
||||||
|
bzip2
|
||||||
|
darcs
|
||||||
|
foreman
|
||||||
|
top-git
|
||||||
|
mercurial
|
||||||
|
subversion
|
||||||
|
breezy
|
||||||
|
openldap
|
||||||
|
postgresql_13
|
||||||
|
pixz
|
||||||
|
];
|
||||||
|
|
||||||
|
checkInputs = [
|
||||||
|
cacert
|
||||||
|
glibcLocales
|
||||||
|
libressl.nc
|
||||||
|
python3
|
||||||
|
];
|
||||||
|
|
||||||
|
hydraPath = lib.makeBinPath (
|
||||||
|
[
|
||||||
|
subversion
|
||||||
|
openssh
|
||||||
|
nix
|
||||||
|
coreutils
|
||||||
|
findutils
|
||||||
|
pixz
|
||||||
|
gzip
|
||||||
|
bzip2
|
||||||
|
xz
|
||||||
|
gnutar
|
||||||
|
unzip
|
||||||
|
git
|
||||||
|
top-git
|
||||||
|
mercurial
|
||||||
|
darcs
|
||||||
|
gnused
|
||||||
|
breezy
|
||||||
|
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||||
|
);
|
||||||
|
|
||||||
|
OPENLDAP_ROOT = openldap;
|
||||||
|
|
||||||
|
shellHook = ''
|
||||||
|
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||||
|
|
||||||
|
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||||
|
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||||
|
export HYDRA_HOME="$(pwd)/src/"
|
||||||
|
mkdir -p .hydra-data
|
||||||
|
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||||
|
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||||
|
|
||||||
|
popd >/dev/null
|
||||||
|
'';
|
||||||
|
|
||||||
|
NIX_LDFLAGS = [ "-lpthread" ];
|
||||||
|
|
||||||
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
|
doCheck = true;
|
||||||
|
|
||||||
|
preCheck = ''
|
||||||
|
patchShebangs .
|
||||||
|
export LOGNAME=''${LOGNAME:-foo}
|
||||||
|
# set $HOME for bzr so it can create its trace file
|
||||||
|
export HOME=$(mktemp -d)
|
||||||
|
'';
|
||||||
|
|
||||||
|
postInstall = ''
|
||||||
|
mkdir -p $out/nix-support
|
||||||
|
|
||||||
|
for i in $out/bin/*; do
|
||||||
|
read -n 4 chars < $i
|
||||||
|
if [[ $chars =~ ELF ]]; then continue; fi
|
||||||
|
wrapProgram $i \
|
||||||
|
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||||
|
--prefix PATH ':' $out/bin:$hydraPath \
|
||||||
|
--set HYDRA_RELEASE ${version} \
|
||||||
|
--set HYDRA_HOME $out/libexec/hydra \
|
||||||
|
--set NIX_RELEASE ${nix.name or "unknown"}
|
||||||
|
done
|
||||||
|
'';
|
||||||
|
|
||||||
|
dontStrip = true;
|
||||||
|
|
||||||
|
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||||
|
passthru = { inherit perlDeps nix; };
|
||||||
|
})
|
||||||
@@ -7,6 +7,9 @@
|
|||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "eval.hh"
|
#include "eval.hh"
|
||||||
#include "eval-inline.hh"
|
#include "eval-inline.hh"
|
||||||
|
#include "eval-settings.hh"
|
||||||
|
#include "signals.hh"
|
||||||
|
#include "terminal.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "get-drvs.hh"
|
#include "get-drvs.hh"
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
@@ -53,7 +56,7 @@ using namespace nix;
|
|||||||
static Path gcRootsDir;
|
static Path gcRootsDir;
|
||||||
static size_t maxMemorySize;
|
static size_t maxMemorySize;
|
||||||
|
|
||||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
|
||||||
{
|
{
|
||||||
Path releaseExpr;
|
Path releaseExpr;
|
||||||
bool flake = false;
|
bool flake = false;
|
||||||
@@ -86,7 +89,7 @@ struct MyArgs : MixEvalArgs, MixCommonArgs
|
|||||||
|
|
||||||
static MyArgs myArgs;
|
static MyArgs myArgs;
|
||||||
|
|
||||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
|
static std::string queryMetaStrings(EvalState & state, PackageInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||||
{
|
{
|
||||||
Strings res;
|
Strings res;
|
||||||
std::function<void(Value & v)> rec;
|
std::function<void(Value & v)> rec;
|
||||||
@@ -94,13 +97,13 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std:
|
|||||||
rec = [&](Value & v) {
|
rec = [&](Value & v) {
|
||||||
state.forceValue(v, noPos);
|
state.forceValue(v, noPos);
|
||||||
if (v.type() == nString)
|
if (v.type() == nString)
|
||||||
res.push_back(v.string.s);
|
res.emplace_back(v.string_view());
|
||||||
else if (v.isList())
|
else if (v.isList())
|
||||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||||
rec(*v.listElems()[n]);
|
rec(*v.listElems()[n]);
|
||||||
else if (v.type() == nAttrs) {
|
else if (v.type() == nAttrs) {
|
||||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
auto a = v.attrs()->find(state.symbols.create(subAttribute));
|
||||||
if (a != v.attrs->end())
|
if (a != v.attrs()->end())
|
||||||
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
|
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -135,12 +138,12 @@ static void worker(
|
|||||||
|
|
||||||
callFlake(state, lockedFlake, *vFlake);
|
callFlake(state, lockedFlake, *vFlake);
|
||||||
|
|
||||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
auto vOutputs = vFlake->attrs()->get(state.symbols.create("outputs"))->value;
|
||||||
state.forceValue(*vOutputs, noPos);
|
state.forceValue(*vOutputs, noPos);
|
||||||
|
|
||||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
auto aHydraJobs = vOutputs->attrs()->get(state.symbols.create("hydraJobs"));
|
||||||
if (!aHydraJobs)
|
if (!aHydraJobs)
|
||||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
aHydraJobs = vOutputs->attrs()->get(state.symbols.create("checks"));
|
||||||
if (!aHydraJobs)
|
if (!aHydraJobs)
|
||||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
||||||
|
|
||||||
@@ -175,10 +178,14 @@ static void worker(
|
|||||||
|
|
||||||
if (auto drv = getDerivation(state, *v, false)) {
|
if (auto drv = getDerivation(state, *v, false)) {
|
||||||
|
|
||||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
// CA derivations do not have static output paths, so we
|
||||||
|
// have to defensively not query output paths in case we
|
||||||
|
// encounter one.
|
||||||
|
PackageInfo::Outputs outputs = drv->queryOutputs(
|
||||||
|
!experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||||
|
|
||||||
if (drv->querySystem() == "unknown")
|
if (drv->querySystem() == "unknown")
|
||||||
throw EvalError("derivation must have a 'system' attribute");
|
state.error<EvalError>("derivation must have a 'system' attribute").debugThrow();
|
||||||
|
|
||||||
auto drvPath = state.store->printStorePath(drv->requireDrvPath());
|
auto drvPath = state.store->printStorePath(drv->requireDrvPath());
|
||||||
|
|
||||||
@@ -197,31 +204,31 @@ static void worker(
|
|||||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
||||||
|
|
||||||
/* If this is an aggregate, then get its constituents. */
|
/* If this is an aggregate, then get its constituents. */
|
||||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
auto a = v->attrs()->get(state.symbols.create("_hydraAggregate"));
|
||||||
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
|
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
|
||||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
auto a = v->attrs()->get(state.symbols.create("constituents"));
|
||||||
if (!a)
|
if (!a)
|
||||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
state.error<EvalError>("derivation must have a ‘constituents’ attribute").debugThrow();
|
||||||
|
|
||||||
NixStringContext context;
|
NixStringContext context;
|
||||||
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
|
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
|
||||||
for (auto & c : context)
|
for (auto & c : context)
|
||||||
std::visit(overloaded {
|
std::visit(overloaded {
|
||||||
[&](const NixStringContextElem::Built & b) {
|
[&](const NixStringContextElem::Built & b) {
|
||||||
job["constituents"].push_back(state.store->printStorePath(b.drvPath));
|
job["constituents"].push_back(b.drvPath->to_string(*state.store));
|
||||||
},
|
},
|
||||||
[&](const NixStringContextElem::Opaque & o) {
|
[&](const NixStringContextElem::Opaque & o) {
|
||||||
},
|
},
|
||||||
[&](const NixStringContextElem::DrvDeep & d) {
|
[&](const NixStringContextElem::DrvDeep & d) {
|
||||||
},
|
},
|
||||||
}, c.raw());
|
}, c.raw);
|
||||||
|
|
||||||
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
|
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
|
||||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||||
auto v = a->value->listElems()[n];
|
auto v = a->value->listElems()[n];
|
||||||
state.forceValue(*v, noPos);
|
state.forceValue(*v, noPos);
|
||||||
if (v->type() == nString)
|
if (v->type() == nString)
|
||||||
job["namedConstituents"].push_back(v->str());
|
job["namedConstituents"].push_back(v->string_view());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,19 +243,24 @@ static void worker(
|
|||||||
}
|
}
|
||||||
|
|
||||||
nlohmann::json out;
|
nlohmann::json out;
|
||||||
for (auto & j : outputs)
|
for (auto & [outputName, optOutputPath] : outputs) {
|
||||||
// FIXME: handle CA/impure builds.
|
if (optOutputPath) {
|
||||||
if (j.second)
|
out[outputName] = state.store->printStorePath(*optOutputPath);
|
||||||
out[j.first] = state.store->printStorePath(*j.second);
|
} else {
|
||||||
|
// See the `queryOutputs` call above; we should
|
||||||
|
// not encounter missing output paths otherwise.
|
||||||
|
assert(experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||||
|
out[outputName] = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
job["outputs"] = std::move(out);
|
job["outputs"] = std::move(out);
|
||||||
|
|
||||||
reply["job"] = std::move(job);
|
reply["job"] = std::move(job);
|
||||||
}
|
}
|
||||||
|
|
||||||
else if (v->type() == nAttrs) {
|
else if (v->type() == nAttrs) {
|
||||||
auto attrs = nlohmann::json::array();
|
auto attrs = nlohmann::json::array();
|
||||||
StringSet ss;
|
StringSet ss;
|
||||||
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
|
for (auto & i : v->attrs()->lexicographicOrder(state.symbols)) {
|
||||||
std::string name(state.symbols[i->name]);
|
std::string name(state.symbols[i->name]);
|
||||||
if (name.find(' ') != std::string::npos) {
|
if (name.find(' ') != std::string::npos) {
|
||||||
printError("skipping job with illegal name '%s'", name);
|
printError("skipping job with illegal name '%s'", name);
|
||||||
@@ -262,7 +274,7 @@ static void worker(
|
|||||||
else if (v->type() == nNull)
|
else if (v->type() == nNull)
|
||||||
;
|
;
|
||||||
|
|
||||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
else state.error<TypeError>("attribute '%s' is %s, which is not supported", attrPath, showType(*v)).debugThrow();
|
||||||
|
|
||||||
} catch (EvalError & e) {
|
} catch (EvalError & e) {
|
||||||
auto msg = e.msg();
|
auto msg = e.msg();
|
||||||
@@ -356,7 +368,7 @@ int main(int argc, char * * argv)
|
|||||||
]()
|
]()
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
EvalState state(myArgs.searchPath, openStore());
|
EvalState state(myArgs.lookupPath, openStore());
|
||||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
||||||
worker(state, autoArgs, *to, *from);
|
worker(state, autoArgs, *to, *from);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
@@ -516,7 +528,7 @@ int main(int argc, char * * argv)
|
|||||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||||
auto drv2 = store->readDerivation(drvPath2);
|
auto drv2 = store->readDerivation(drvPath2);
|
||||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (brokenJobs.empty()) {
|
if (brokenJobs.empty()) {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
#include "hydra-config.hh"
|
#include "hydra-config.hh"
|
||||||
#include "pool.hh"
|
#include "pool.hh"
|
||||||
#include "shared.hh"
|
#include "shared.hh"
|
||||||
|
#include "signals.hh"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
@@ -37,7 +38,7 @@ class JobsetId {
|
|||||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||||
|
|
||||||
std::string display() const {
|
std::string display() const {
|
||||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||||
|
|||||||
@@ -8,27 +8,20 @@
|
|||||||
#include "build-result.hh"
|
#include "build-result.hh"
|
||||||
#include "path.hh"
|
#include "path.hh"
|
||||||
#include "serve-protocol.hh"
|
#include "serve-protocol.hh"
|
||||||
|
#include "serve-protocol-impl.hh"
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
|
#include "current-process.hh"
|
||||||
|
#include "processes.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "worker-protocol.hh"
|
#include "serve-protocol.hh"
|
||||||
#include "worker-protocol-impl.hh"
|
#include "serve-protocol-impl.hh"
|
||||||
|
#include "ssh.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "url.hh"
|
#include "url.hh"
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
|
namespace nix::build_remote {
|
||||||
struct Child
|
|
||||||
{
|
|
||||||
Pid pid;
|
|
||||||
AutoCloseFD to, from;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static void append(Strings & dst, const Strings & src)
|
|
||||||
{
|
|
||||||
dst.insert(dst.end(), src.begin(), src.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
static Strings extraStoreArgs(std::string & machine)
|
static Strings extraStoreArgs(std::string & machine)
|
||||||
{
|
{
|
||||||
@@ -50,82 +43,52 @@ static Strings extraStoreArgs(std::string & machine)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||||
|
::Machine::ptr machine, SSHMaster & master)
|
||||||
{
|
{
|
||||||
std::string pgmName;
|
Strings command = {"nix-store", "--serve", "--write"};
|
||||||
Pipe to, from;
|
|
||||||
to.create();
|
|
||||||
from.create();
|
|
||||||
|
|
||||||
Strings argv;
|
|
||||||
if (machine->isLocalhost()) {
|
if (machine->isLocalhost()) {
|
||||||
pgmName = "nix-store";
|
command.push_back("--builders");
|
||||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
command.push_back("");
|
||||||
} else {
|
} else {
|
||||||
pgmName = "ssh";
|
command.splice(command.end(), extraStoreArgs(machine->sshName));
|
||||||
auto sshName = machine->sshName;
|
|
||||||
Strings extraArgs = extraStoreArgs(sshName);
|
|
||||||
argv = {"ssh", sshName};
|
|
||||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
|
||||||
if (machine->sshPublicHostKey != "") {
|
|
||||||
Path fileName = tmpDir + "/host-key";
|
|
||||||
auto p = machine->sshName.find("@");
|
|
||||||
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
|
|
||||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
|
||||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
|
||||||
}
|
|
||||||
append(argv,
|
|
||||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
|
||||||
, "--", "nix-store", "--serve", "--write" });
|
|
||||||
append(argv, extraArgs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
child.pid = startProcess([&]() {
|
auto ret = master.startCommand(std::move(command), {
|
||||||
restoreProcessContext();
|
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||||
|
|
||||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
|
||||||
throw SysError("cannot dup input pipe to stdin");
|
|
||||||
|
|
||||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
|
||||||
throw SysError("cannot dup output pipe to stdout");
|
|
||||||
|
|
||||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
|
||||||
throw SysError("cannot dup stderr");
|
|
||||||
|
|
||||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
|
||||||
|
|
||||||
throw SysError("cannot start %s", pgmName);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
to.readSide = -1;
|
// XXX: determine the actual max value we can use from /proc.
|
||||||
from.writeSide = -1;
|
|
||||||
|
|
||||||
child.to = to.writeSide.release();
|
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||||
child.from = from.readSide.release();
|
|
||||||
|
int pipesize = 1024 * 1024;
|
||||||
|
|
||||||
|
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||||
|
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
static void copyClosureTo(
|
||||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
::Machine::Connection & conn,
|
||||||
bool useSubstitutes = false)
|
Store & destStore,
|
||||||
|
const StorePathSet & paths,
|
||||||
|
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||||
{
|
{
|
||||||
StorePathSet closure;
|
StorePathSet closure;
|
||||||
destStore.computeFSClosure(paths, closure);
|
destStore.computeFSClosure(paths, closure);
|
||||||
|
|
||||||
WorkerProto::WriteConn wconn { .to = to };
|
|
||||||
WorkerProto::ReadConn rconn { .from = from };
|
|
||||||
/* Send the "query valid paths" command with the "lock" option
|
/* Send the "query valid paths" command with the "lock" option
|
||||||
enabled. This prevents a race where the remote host
|
enabled. This prevents a race where the remote host
|
||||||
garbage-collect paths that are already there. Optionally, ask
|
garbage-collect paths that are already there. Optionally, ask
|
||||||
the remote host to substitute missing paths. */
|
the remote host to substitute missing paths. */
|
||||||
// FIXME: substitute output pollutes our build log
|
// FIXME: substitute output pollutes our build log
|
||||||
to << ServeProto::Command::QueryValidPaths << 1 << useSubstitutes;
|
|
||||||
WorkerProto::write(destStore, wconn, closure);
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
/* Get back the set of paths that are already valid on the remote
|
/* Get back the set of paths that are already valid on the remote
|
||||||
host. */
|
host. */
|
||||||
auto present = WorkerProto::Serialise<StorePathSet>::read(destStore, rconn);
|
auto present = conn.queryValidPaths(
|
||||||
|
destStore, true, closure, useSubstitutes);
|
||||||
|
|
||||||
if (present.size() == closure.size()) return;
|
if (present.size() == closure.size()) return;
|
||||||
|
|
||||||
@@ -137,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
|||||||
|
|
||||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||||
|
|
||||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||||
std::chrono::seconds(600));
|
std::chrono::seconds(600));
|
||||||
|
|
||||||
to << ServeProto::Command::ImportPaths;
|
conn.to << ServeProto::Command::ImportPaths;
|
||||||
destStore.exportPaths(missing, to);
|
destStore.exportPaths(missing, conn.to);
|
||||||
to.flush();
|
conn.to.flush();
|
||||||
|
|
||||||
if (readInt(from) != 1)
|
if (readInt(conn.from) != 1)
|
||||||
throw Error("remote machine failed to import closure");
|
throw Error("remote machine failed to import closure");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// FIXME: use Store::topoSortPaths().
|
// FIXME: use Store::topoSortPaths().
|
||||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||||
{
|
{
|
||||||
StorePaths sorted;
|
StorePaths sorted;
|
||||||
StorePathSet visited;
|
StorePathSet visited;
|
||||||
@@ -178,40 +141,311 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
|
|||||||
return sorted;
|
return sorted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||||
|
{
|
||||||
|
std::string base(drvPath.to_string());
|
||||||
|
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||||
|
|
||||||
|
createDirs(dirOf(logFile));
|
||||||
|
|
||||||
|
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||||
|
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||||
|
|
||||||
|
return {std::move(logFile), std::move(logFD)};
|
||||||
|
}
|
||||||
|
|
||||||
|
static BasicDerivation sendInputs(
|
||||||
|
State & state,
|
||||||
|
Step & step,
|
||||||
|
Store & localStore,
|
||||||
|
Store & destStore,
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
unsigned int & overhead,
|
||||||
|
counter & nrStepsWaiting,
|
||||||
|
counter & nrStepsCopyingTo
|
||||||
|
)
|
||||||
|
{
|
||||||
|
/* Replace the input derivations by their output paths to send a
|
||||||
|
minimal closure to the builder.
|
||||||
|
|
||||||
|
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||||
|
is safe to do this in all cases. (It should probably have a mode
|
||||||
|
to do that, however, but we would not use it here.)
|
||||||
|
*/
|
||||||
|
BasicDerivation basicDrv = ({
|
||||||
|
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||||
|
if (!maybeBasicDrv)
|
||||||
|
throw Error(
|
||||||
|
"the derivation '%s' can’t be resolved. It’s probably "
|
||||||
|
"missing some outputs",
|
||||||
|
localStore.printStorePath(step.drvPath));
|
||||||
|
*maybeBasicDrv;
|
||||||
|
});
|
||||||
|
|
||||||
|
/* Ensure that the inputs exist in the destination store. This is
|
||||||
|
a no-op for regular stores, but for the binary cache store,
|
||||||
|
this will copy the inputs to the binary cache from the local
|
||||||
|
store. */
|
||||||
|
if (&localStore != &destStore) {
|
||||||
|
copyClosure(localStore, destStore,
|
||||||
|
step.drv->inputSrcs,
|
||||||
|
NoRepair, NoCheckSigs, NoSubstitute);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||||
|
mc1.reset();
|
||||||
|
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||||
|
|
||||||
|
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||||
|
localStore.printStorePath(step.drvPath), conn.machine->sshName);
|
||||||
|
|
||||||
|
auto now1 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
/* Copy the input closure. */
|
||||||
|
if (conn.machine->isLocalhost()) {
|
||||||
|
StorePathSet closure;
|
||||||
|
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||||
|
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||||
|
} else {
|
||||||
|
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto now2 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
|
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||||
|
}
|
||||||
|
|
||||||
|
return basicDrv;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BuildResult performBuild(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
Store & localStore,
|
||||||
|
StorePath drvPath,
|
||||||
|
const BasicDerivation & drv,
|
||||||
|
const ServeProto::BuildOptions & options,
|
||||||
|
counter & nrStepsBuilding
|
||||||
|
)
|
||||||
|
{
|
||||||
|
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||||
|
|
||||||
|
BuildResult result;
|
||||||
|
|
||||||
|
time_t startTime, stopTime;
|
||||||
|
|
||||||
|
startTime = time(0);
|
||||||
|
{
|
||||||
|
MaintainCount<counter> mc(nrStepsBuilding);
|
||||||
|
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||||
|
}
|
||||||
|
stopTime = time(0);
|
||||||
|
|
||||||
|
if (!result.startTime) {
|
||||||
|
// If the builder gave `startTime = 0`, use our measurements
|
||||||
|
// instead of the builder's.
|
||||||
|
//
|
||||||
|
// Note: this represents the duration of a single round, rather
|
||||||
|
// than all rounds.
|
||||||
|
result.startTime = startTime;
|
||||||
|
result.stopTime = stopTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||||
|
// it manually by introspecting the derivation.
|
||||||
|
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||||
|
{
|
||||||
|
// If the remote is too old to handle CA derivations, we can’t get this
|
||||||
|
// far anyways
|
||||||
|
assert(drv.type().hasKnownOutputPaths());
|
||||||
|
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||||
|
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||||
|
// do any real work.
|
||||||
|
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||||
|
for (auto & [outputName, output] : drvOutputs) {
|
||||||
|
auto outputPath = output.second;
|
||||||
|
// We’ve just asserted that the output paths of the derivation
|
||||||
|
// were known
|
||||||
|
assert(outputPath);
|
||||||
|
auto outputHash = outputHashes.at(outputName);
|
||||||
|
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||||
|
result.builtOutputs.insert_or_assign(
|
||||||
|
std::move(outputName),
|
||||||
|
Realisation { drvOutput, *outputPath });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::map<StorePath, UnkeyedValidPathInfo> queryPathInfos(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
Store & localStore,
|
||||||
|
StorePathSet & outputs,
|
||||||
|
size_t & totalNarSize
|
||||||
|
)
|
||||||
|
{
|
||||||
|
|
||||||
|
/* Get info about each output path. */
|
||||||
|
std::map<StorePath, UnkeyedValidPathInfo> infos;
|
||||||
|
conn.to << ServeProto::Command::QueryPathInfos;
|
||||||
|
ServeProto::write(localStore, conn, outputs);
|
||||||
|
conn.to.flush();
|
||||||
|
while (true) {
|
||||||
|
auto storePathS = readString(conn.from);
|
||||||
|
if (storePathS == "") break;
|
||||||
|
|
||||||
|
auto storePath = localStore.parseStorePath(storePathS);
|
||||||
|
auto info = ServeProto::Serialise<UnkeyedValidPathInfo>::read(localStore, conn);
|
||||||
|
totalNarSize += info.narSize;
|
||||||
|
infos.insert_or_assign(std::move(storePath), std::move(info));
|
||||||
|
}
|
||||||
|
|
||||||
|
return infos;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void copyPathFromRemote(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
NarMemberDatas & narMembers,
|
||||||
|
Store & localStore,
|
||||||
|
Store & destStore,
|
||||||
|
const ValidPathInfo & info
|
||||||
|
)
|
||||||
|
{
|
||||||
|
/* Receive the NAR from the remote and add it to the
|
||||||
|
destination store. Meanwhile, extract all the info from the
|
||||||
|
NAR that getBuildOutput() needs. */
|
||||||
|
auto source2 = sinkToSource([&](Sink & sink)
|
||||||
|
{
|
||||||
|
/* Note: we should only send the command to dump the store
|
||||||
|
path to the remote if the NAR is actually going to get read
|
||||||
|
by the destination store, which won't happen if this path
|
||||||
|
is already valid on the destination store. Since this
|
||||||
|
lambda function only gets executed if someone tries to read
|
||||||
|
from source2, we will send the command from here rather
|
||||||
|
than outside the lambda. */
|
||||||
|
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||||
|
conn.to.flush();
|
||||||
|
|
||||||
|
TeeSource tee(conn.from, sink);
|
||||||
|
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||||
|
});
|
||||||
|
|
||||||
|
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void copyPathsFromRemote(
|
||||||
|
::Machine::Connection & conn,
|
||||||
|
NarMemberDatas & narMembers,
|
||||||
|
Store & localStore,
|
||||||
|
Store & destStore,
|
||||||
|
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||||
|
)
|
||||||
|
{
|
||||||
|
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||||
|
|
||||||
|
for (auto & path : pathsSorted) {
|
||||||
|
auto & info = infos.find(path)->second;
|
||||||
|
copyPathFromRemote(
|
||||||
|
conn, narMembers, localStore, destStore,
|
||||||
|
ValidPathInfo { path, info });
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* using namespace nix::build_remote; */
|
||||||
|
|
||||||
|
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||||
|
{
|
||||||
|
startTime = buildResult.startTime;
|
||||||
|
stopTime = buildResult.stopTime;
|
||||||
|
timesBuilt = buildResult.timesBuilt;
|
||||||
|
errorMsg = buildResult.errorMsg;
|
||||||
|
isNonDeterministic = buildResult.isNonDeterministic;
|
||||||
|
|
||||||
|
switch ((BuildResult::Status) buildResult.status) {
|
||||||
|
case BuildResult::Built:
|
||||||
|
stepStatus = bsSuccess;
|
||||||
|
break;
|
||||||
|
case BuildResult::Substituted:
|
||||||
|
case BuildResult::AlreadyValid:
|
||||||
|
stepStatus = bsSuccess;
|
||||||
|
isCached = true;
|
||||||
|
break;
|
||||||
|
case BuildResult::PermanentFailure:
|
||||||
|
stepStatus = bsFailed;
|
||||||
|
canCache = true;
|
||||||
|
errorMsg = "";
|
||||||
|
break;
|
||||||
|
case BuildResult::InputRejected:
|
||||||
|
case BuildResult::OutputRejected:
|
||||||
|
stepStatus = bsFailed;
|
||||||
|
canCache = true;
|
||||||
|
break;
|
||||||
|
case BuildResult::TransientFailure:
|
||||||
|
stepStatus = bsFailed;
|
||||||
|
canRetry = true;
|
||||||
|
errorMsg = "";
|
||||||
|
break;
|
||||||
|
case BuildResult::TimedOut:
|
||||||
|
stepStatus = bsTimedOut;
|
||||||
|
errorMsg = "";
|
||||||
|
break;
|
||||||
|
case BuildResult::MiscFailure:
|
||||||
|
stepStatus = bsAborted;
|
||||||
|
canRetry = true;
|
||||||
|
break;
|
||||||
|
case BuildResult::LogLimitExceeded:
|
||||||
|
stepStatus = bsLogLimitExceeded;
|
||||||
|
break;
|
||||||
|
case BuildResult::NotDeterministic:
|
||||||
|
stepStatus = bsNotDeterministic;
|
||||||
|
canRetry = false;
|
||||||
|
canCache = true;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
stepStatus = bsAborted;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void State::buildRemote(ref<Store> destStore,
|
void State::buildRemote(ref<Store> destStore,
|
||||||
Machine::ptr machine, Step::ptr step,
|
::Machine::ptr machine, Step::ptr step,
|
||||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
const ServeProto::BuildOptions & buildOptions,
|
||||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||||
std::function<void(StepState)> updateStep,
|
std::function<void(StepState)> updateStep,
|
||||||
NarMemberDatas & narMembers)
|
NarMemberDatas & narMembers)
|
||||||
{
|
{
|
||||||
assert(BuildResult::TimedOut == 8);
|
assert(BuildResult::TimedOut == 8);
|
||||||
|
|
||||||
std::string base(step->drvPath.to_string());
|
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||||
result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
AutoDelete logFileDel(logFile, false);
|
||||||
AutoDelete autoDelete(result.logFile, false);
|
result.logFile = logFile;
|
||||||
|
|
||||||
createDirs(dirOf(result.logFile));
|
|
||||||
|
|
||||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
|
||||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
|
||||||
|
|
||||||
nix::Path tmpDir = createTempDir();
|
|
||||||
AutoDelete tmpDirDel(tmpDir, true);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
updateStep(ssConnecting);
|
updateStep(ssConnecting);
|
||||||
|
|
||||||
|
SSHMaster master {
|
||||||
|
machine->sshName,
|
||||||
|
machine->sshKey,
|
||||||
|
machine->sshPublicHostKey,
|
||||||
|
false, // no SSH master yet
|
||||||
|
false, // no compression yet
|
||||||
|
logFD.get(),
|
||||||
|
};
|
||||||
|
|
||||||
// FIXME: rewrite to use Store.
|
// FIXME: rewrite to use Store.
|
||||||
Child child;
|
auto child = build_remote::openConnection(machine, master);
|
||||||
openConnection(machine, tmpDir, logFD.get(), child);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto activeStepState(activeStep->state_.lock());
|
auto activeStepState(activeStep->state_.lock());
|
||||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||||
activeStepState->pid = child.pid;
|
activeStepState->pid = child->sshPid;
|
||||||
}
|
}
|
||||||
|
|
||||||
Finally clearPid([&]() {
|
Finally clearPid([&]() {
|
||||||
@@ -226,38 +460,41 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
process. Meh. */
|
process. Meh. */
|
||||||
});
|
});
|
||||||
|
|
||||||
FdSource from(child.from.get());
|
::Machine::Connection conn {
|
||||||
WorkerProto::ReadConn rconn { .from = from };
|
{
|
||||||
FdSink to(child.to.get());
|
.to = child->in.get(),
|
||||||
WorkerProto::WriteConn wconn { .to = to };
|
.from = child->out.get(),
|
||||||
|
/* Handshake. */
|
||||||
|
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||||
|
},
|
||||||
|
/*.machine =*/ machine,
|
||||||
|
};
|
||||||
|
|
||||||
Finally updateStats([&]() {
|
Finally updateStats([&]() {
|
||||||
bytesReceived += from.read;
|
bytesReceived += conn.from.read;
|
||||||
bytesSent += to.written;
|
bytesSent += conn.to.written;
|
||||||
});
|
});
|
||||||
|
|
||||||
/* Handshake. */
|
constexpr ServeProto::Version our_version = 0x206;
|
||||||
unsigned int remoteVersion;
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
to << SERVE_MAGIC_1 << 0x206;
|
conn.remoteVersion = decltype(conn)::handshake(
|
||||||
to.flush();
|
conn.to,
|
||||||
|
conn.from,
|
||||||
unsigned int magic = readInt(from);
|
our_version,
|
||||||
if (magic != SERVE_MAGIC_2)
|
machine->sshName);
|
||||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
|
||||||
remoteVersion = readInt(from);
|
|
||||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
|
||||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
|
||||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
|
||||||
|
|
||||||
} catch (EndOfFile & e) {
|
} catch (EndOfFile & e) {
|
||||||
child.pid.wait();
|
child->sshPid.wait();
|
||||||
std::string s = chomp(readFile(result.logFile));
|
std::string s = chomp(readFile(result.logFile));
|
||||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Do not attempt to speak a newer version of the protocol.
|
||||||
|
//
|
||||||
|
// Per https://github.com/NixOS/nix/issues/9584 should be handled as
|
||||||
|
// part of `handshake` in upstream nix.
|
||||||
|
conn.remoteVersion = std::min(conn.remoteVersion, our_version);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto info(machine->state->connectInfo.lock());
|
auto info(machine->state->connectInfo.lock());
|
||||||
info->consecutiveFailures = 0;
|
info->consecutiveFailures = 0;
|
||||||
@@ -269,62 +506,12 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
copy the immediate sources of the derivation and the required
|
copy the immediate sources of the derivation and the required
|
||||||
outputs of the input derivations. */
|
outputs of the input derivations. */
|
||||||
updateStep(ssSendingInputs);
|
updateStep(ssSendingInputs);
|
||||||
|
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||||
|
|
||||||
StorePathSet inputs;
|
logFileDel.cancel();
|
||||||
BasicDerivation basicDrv(*step->drv);
|
|
||||||
|
|
||||||
for (auto & p : step->drv->inputSrcs)
|
|
||||||
inputs.insert(p);
|
|
||||||
|
|
||||||
for (auto & input : step->drv->inputDrvs) {
|
|
||||||
auto drv2 = localStore->readDerivation(input.first);
|
|
||||||
for (auto & name : input.second) {
|
|
||||||
if (auto i = get(drv2.outputs, name)) {
|
|
||||||
auto outPath = i->path(*localStore, drv2.name, name);
|
|
||||||
inputs.insert(*outPath);
|
|
||||||
basicDrv.inputSrcs.insert(*outPath);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure that the inputs exist in the destination store. This is
|
|
||||||
a no-op for regular stores, but for the binary cache store,
|
|
||||||
this will copy the inputs to the binary cache from the local
|
|
||||||
store. */
|
|
||||||
if (localStore != std::shared_ptr<Store>(destStore)) {
|
|
||||||
copyClosure(*localStore, *destStore,
|
|
||||||
step->drv->inputSrcs,
|
|
||||||
NoRepair, NoCheckSigs, NoSubstitute);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
|
||||||
mc1.reset();
|
|
||||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
|
||||||
|
|
||||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
|
||||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
|
||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
/* Copy the input closure. */
|
|
||||||
if (machine->isLocalhost()) {
|
|
||||||
StorePathSet closure;
|
|
||||||
destStore->computeFSClosure(inputs, closure);
|
|
||||||
copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
|
||||||
} else {
|
|
||||||
copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
|
||||||
|
|
||||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
|
||||||
}
|
|
||||||
|
|
||||||
autoDelete.cancel();
|
|
||||||
|
|
||||||
/* Truncate the log to get rid of messages about substitutions
|
/* Truncate the log to get rid of messages about substitutions
|
||||||
etc. on the remote system. */
|
etc. on the remote system. */
|
||||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||||
|
|
||||||
@@ -340,85 +527,17 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
|
|
||||||
updateStep(ssBuilding);
|
updateStep(ssBuilding);
|
||||||
|
|
||||||
to << ServeProto::Command::BuildDerivation << localStore->printStorePath(step->drvPath);
|
BuildResult buildResult = build_remote::performBuild(
|
||||||
writeDerivation(to, *localStore, basicDrv);
|
conn,
|
||||||
to << maxSilentTime << buildTimeout;
|
*localStore,
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
step->drvPath,
|
||||||
to << maxLogSize;
|
resolvedDrv,
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
buildOptions,
|
||||||
to << repeats // == build-repeat
|
nrStepsBuilding
|
||||||
<< step->isDeterministic; // == enforce-determinism
|
);
|
||||||
}
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
result.startTime = time(0);
|
result.updateWithBuildResult(buildResult);
|
||||||
int res;
|
|
||||||
{
|
|
||||||
MaintainCount<counter> mc(nrStepsBuilding);
|
|
||||||
res = readInt(from);
|
|
||||||
}
|
|
||||||
result.stopTime = time(0);
|
|
||||||
|
|
||||||
result.errorMsg = readString(from);
|
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
|
||||||
result.timesBuilt = readInt(from);
|
|
||||||
result.isNonDeterministic = readInt(from);
|
|
||||||
auto start = readInt(from);
|
|
||||||
auto stop = readInt(from);
|
|
||||||
if (start && start) {
|
|
||||||
/* Note: this represents the duration of a single
|
|
||||||
round, rather than all rounds. */
|
|
||||||
result.startTime = start;
|
|
||||||
result.stopTime = stop;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
|
|
||||||
WorkerProto::Serialise<DrvOutputs>::read(*localStore, rconn);
|
|
||||||
}
|
|
||||||
switch ((BuildResult::Status) res) {
|
|
||||||
case BuildResult::Built:
|
|
||||||
result.stepStatus = bsSuccess;
|
|
||||||
break;
|
|
||||||
case BuildResult::Substituted:
|
|
||||||
case BuildResult::AlreadyValid:
|
|
||||||
result.stepStatus = bsSuccess;
|
|
||||||
result.isCached = true;
|
|
||||||
break;
|
|
||||||
case BuildResult::PermanentFailure:
|
|
||||||
result.stepStatus = bsFailed;
|
|
||||||
result.canCache = true;
|
|
||||||
result.errorMsg = "";
|
|
||||||
break;
|
|
||||||
case BuildResult::InputRejected:
|
|
||||||
case BuildResult::OutputRejected:
|
|
||||||
result.stepStatus = bsFailed;
|
|
||||||
result.canCache = true;
|
|
||||||
break;
|
|
||||||
case BuildResult::TransientFailure:
|
|
||||||
result.stepStatus = bsFailed;
|
|
||||||
result.canRetry = true;
|
|
||||||
result.errorMsg = "";
|
|
||||||
break;
|
|
||||||
case BuildResult::TimedOut:
|
|
||||||
result.stepStatus = bsTimedOut;
|
|
||||||
result.errorMsg = "";
|
|
||||||
break;
|
|
||||||
case BuildResult::MiscFailure:
|
|
||||||
result.stepStatus = bsAborted;
|
|
||||||
result.canRetry = true;
|
|
||||||
break;
|
|
||||||
case BuildResult::LogLimitExceeded:
|
|
||||||
result.stepStatus = bsLogLimitExceeded;
|
|
||||||
break;
|
|
||||||
case BuildResult::NotDeterministic:
|
|
||||||
result.stepStatus = bsNotDeterministic;
|
|
||||||
result.canRetry = false;
|
|
||||||
result.canCache = true;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
result.stepStatus = bsAborted;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (result.stepStatus != bsSuccess) return;
|
if (result.stepStatus != bsSuccess) return;
|
||||||
|
|
||||||
result.errorMsg = "";
|
result.errorMsg = "";
|
||||||
@@ -432,6 +551,10 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
result.logFile = "";
|
result.logFile = "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
StorePathSet outputs;
|
||||||
|
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||||
|
outputs.insert(realisation.outPath);
|
||||||
|
|
||||||
/* Copy the output paths. */
|
/* Copy the output paths. */
|
||||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||||
updateStep(ssReceivingOutputs);
|
updateStep(ssReceivingOutputs);
|
||||||
@@ -440,39 +563,8 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
|
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
auto now1 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
StorePathSet outputs;
|
|
||||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
|
||||||
if (i.second.second)
|
|
||||||
outputs.insert(*i.second.second);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get info about each output path. */
|
|
||||||
std::map<StorePath, ValidPathInfo> infos;
|
|
||||||
size_t totalNarSize = 0;
|
size_t totalNarSize = 0;
|
||||||
to << ServeProto::Command::QueryPathInfos;
|
auto infos = build_remote::queryPathInfos(conn, *localStore, outputs, totalNarSize);
|
||||||
WorkerProto::write(*localStore, wconn, outputs);
|
|
||||||
to.flush();
|
|
||||||
while (true) {
|
|
||||||
auto storePathS = readString(from);
|
|
||||||
if (storePathS == "") break;
|
|
||||||
auto deriver = readString(from); // deriver
|
|
||||||
auto references = WorkerProto::Serialise<StorePathSet>::read(*localStore, rconn);
|
|
||||||
readLongLong(from); // download size
|
|
||||||
auto narSize = readLongLong(from);
|
|
||||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
|
||||||
auto ca = ContentAddress::parseOpt(readString(from));
|
|
||||||
readStrings<StringSet>(from); // sigs
|
|
||||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
|
||||||
assert(outputs.count(info.path));
|
|
||||||
info.references = references;
|
|
||||||
info.narSize = narSize;
|
|
||||||
totalNarSize += info.narSize;
|
|
||||||
info.narHash = narHash;
|
|
||||||
info.ca = ca;
|
|
||||||
if (deriver != "")
|
|
||||||
info.deriver = localStore->parseStorePath(deriver);
|
|
||||||
infos.insert_or_assign(info.path, info);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (totalNarSize > maxOutputSize) {
|
if (totalNarSize > maxOutputSize) {
|
||||||
result.stepStatus = bsNarSizeLimitExceeded;
|
result.stepStatus = bsNarSizeLimitExceeded;
|
||||||
@@ -483,41 +575,30 @@ void State::buildRemote(ref<Store> destStore,
|
|||||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||||
|
|
||||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||||
|
|
||||||
for (auto & path : pathsSorted) {
|
|
||||||
auto & info = infos.find(path)->second;
|
|
||||||
|
|
||||||
/* Receive the NAR from the remote and add it to the
|
|
||||||
destination store. Meanwhile, extract all the info from the
|
|
||||||
NAR that getBuildOutput() needs. */
|
|
||||||
auto source2 = sinkToSource([&](Sink & sink)
|
|
||||||
{
|
|
||||||
/* Note: we should only send the command to dump the store
|
|
||||||
path to the remote if the NAR is actually going to get read
|
|
||||||
by the destination store, which won't happen if this path
|
|
||||||
is already valid on the destination store. Since this
|
|
||||||
lambda function only gets executed if someone tries to read
|
|
||||||
from source2, we will send the command from here rather
|
|
||||||
than outside the lambda. */
|
|
||||||
to << ServeProto::Command::DumpStorePath << localStore->printStorePath(path);
|
|
||||||
to.flush();
|
|
||||||
|
|
||||||
TeeSource tee(from, sink);
|
|
||||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
|
||||||
});
|
|
||||||
|
|
||||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto now2 = std::chrono::steady_clock::now();
|
auto now2 = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Register the outputs of the newly built drv */
|
||||||
|
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||||
|
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||||
|
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||||
|
// Register the resolved drv output
|
||||||
|
destStore->registerDrvOutput(realisation);
|
||||||
|
|
||||||
|
// Also register the unresolved one
|
||||||
|
auto unresolvedRealisation = realisation;
|
||||||
|
unresolvedRealisation.signatures.clear();
|
||||||
|
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||||
|
destStore->registerDrvOutput(unresolvedRealisation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Shut down the connection. */
|
/* Shut down the connection. */
|
||||||
child.to = -1;
|
child->in = -1;
|
||||||
child.pid.wait();
|
child->sshPid.wait();
|
||||||
|
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
/* Disable this machine until a certain period of time has
|
/* Disable this machine until a certain period of time has
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "fs-accessor.hh"
|
#include "source-accessor.hh"
|
||||||
|
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
|
||||||
@@ -11,18 +11,18 @@ using namespace nix;
|
|||||||
BuildOutput getBuildOutput(
|
BuildOutput getBuildOutput(
|
||||||
nix::ref<Store> store,
|
nix::ref<Store> store,
|
||||||
NarMemberDatas & narMembers,
|
NarMemberDatas & narMembers,
|
||||||
const Derivation & drv)
|
const OutputPathMap derivationOutputs)
|
||||||
{
|
{
|
||||||
BuildOutput res;
|
BuildOutput res;
|
||||||
|
|
||||||
/* Compute the closure size. */
|
/* Compute the closure size. */
|
||||||
StorePathSet outputs;
|
StorePathSet outputs;
|
||||||
StorePathSet closure;
|
StorePathSet closure;
|
||||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||||
if (i.second.second) {
|
store->computeFSClosure(outputPath, closure);
|
||||||
store->computeFSClosure(*i.second.second, closure);
|
outputs.insert(outputPath);
|
||||||
outputs.insert(*i.second.second);
|
res.outputs.insert({outputName, outputPath});
|
||||||
}
|
}
|
||||||
for (auto & path : closure) {
|
for (auto & path : closure) {
|
||||||
auto info = store->queryPathInfo(path);
|
auto info = store->queryPathInfo(path);
|
||||||
res.closureSize += info->narSize;
|
res.closureSize += info->narSize;
|
||||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
|||||||
|
|
||||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||||
if (productsFile == narMembers.end() ||
|
if (productsFile == narMembers.end() ||
|
||||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||||
continue;
|
continue;
|
||||||
assert(productsFile->second.contents);
|
assert(productsFile->second.contents);
|
||||||
|
|
||||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
|||||||
|
|
||||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||||
|
|
||||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||||
product.isRegular = true;
|
product.isRegular = true;
|
||||||
product.fileSize = file->second.fileSize.value();
|
product.fileSize = file->second.fileSize.value();
|
||||||
product.sha256hash = file->second.sha256.value();
|
product.sha256hash = file->second.sha256.value();
|
||||||
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
|
|||||||
/* If no build products were explicitly declared, then add all
|
/* If no build products were explicitly declared, then add all
|
||||||
outputs as a product of type "nix-build". */
|
outputs as a product of type "nix-build". */
|
||||||
if (!explicitProducts) {
|
if (!explicitProducts) {
|
||||||
for (auto & [name, output] : drv.outputs) {
|
for (auto & [name, output] : derivationOutputs) {
|
||||||
BuildProduct product;
|
BuildProduct product;
|
||||||
auto outPath = output.path(*store, drv.name, name);
|
product.path = store->printStorePath(output);
|
||||||
product.path = store->printStorePath(*outPath);
|
|
||||||
product.type = "nix-build";
|
product.type = "nix-build";
|
||||||
product.subtype = name == "out" ? "" : name;
|
product.subtype = name == "out" ? "" : name;
|
||||||
product.name = outPath->name();
|
product.name = output.name();
|
||||||
|
|
||||||
auto file = narMembers.find(product.path);
|
auto file = narMembers.find(product.path);
|
||||||
assert(file != narMembers.end());
|
assert(file != narMembers.end());
|
||||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||||
res.products.push_back(product);
|
res.products.push_back(product);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
|
|||||||
for (auto & output : outputs) {
|
for (auto & output : outputs) {
|
||||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||||
if (file == narMembers.end() ||
|
if (file == narMembers.end() ||
|
||||||
file->second.type != FSAccessor::Type::tRegular)
|
file->second.type != SourceAccessor::Type::tRegular)
|
||||||
continue;
|
continue;
|
||||||
res.releaseName = trim(file->second.contents.value());
|
res.releaseName = trim(file->second.contents.value());
|
||||||
// FIXME: validate release name
|
// FIXME: validate release name
|
||||||
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
|
|||||||
for (auto & output : outputs) {
|
for (auto & output : outputs) {
|
||||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||||
if (file == narMembers.end() ||
|
if (file == narMembers.end() ||
|
||||||
file->second.type != FSAccessor::Type::tRegular)
|
file->second.type != SourceAccessor::Type::tRegular)
|
||||||
continue;
|
continue;
|
||||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||||
|
|||||||
@@ -98,8 +98,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
it). */
|
it). */
|
||||||
BuildID buildId;
|
BuildID buildId;
|
||||||
std::optional<StorePath> buildDrvPath;
|
std::optional<StorePath> buildDrvPath;
|
||||||
unsigned int maxSilentTime, buildTimeout;
|
// Other fields set below
|
||||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
nix::ServeProto::BuildOptions buildOptions {
|
||||||
|
.maxLogSize = maxLogSize,
|
||||||
|
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||||
|
.enforceDeterminism = step->isDeterministic,
|
||||||
|
.keepFailed = false,
|
||||||
|
};
|
||||||
|
|
||||||
auto conn(dbPool.get());
|
auto conn(dbPool.get());
|
||||||
|
|
||||||
@@ -134,18 +139,18 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
{
|
{
|
||||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||||
if (i != jobsetRepeats.end())
|
if (i != jobsetRepeats.end())
|
||||||
repeats = std::max(repeats, i->second);
|
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!build) build = *dependents.begin();
|
if (!build) build = *dependents.begin();
|
||||||
|
|
||||||
buildId = build->id;
|
buildId = build->id;
|
||||||
buildDrvPath = build->drvPath;
|
buildDrvPath = build->drvPath;
|
||||||
maxSilentTime = build->maxSilentTime;
|
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||||
buildTimeout = build->buildTimeout;
|
buildOptions.buildTimeout = build->buildTimeout;
|
||||||
|
|
||||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!buildOneDone)
|
if (!buildOneDone)
|
||||||
@@ -206,7 +211,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
/* FIXME: referring builds may have conflicting timeouts. */
|
/* FIXME: referring builds may have conflicting timeouts. */
|
||||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
if (activeStep->state_.lock()->cancelled) {
|
if (activeStep->state_.lock()->cancelled) {
|
||||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||||
@@ -221,7 +226,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
if (result.stepStatus == bsSuccess) {
|
if (result.stepStatus == bsSuccess) {
|
||||||
updateStep(ssPostProcessing);
|
updateStep(ssPostProcessing);
|
||||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,9 +280,12 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
|||||||
|
|
||||||
assert(stepNr);
|
assert(stepNr);
|
||||||
|
|
||||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||||
if (i.second.second)
|
if (!optOutputPath)
|
||||||
addRoot(*i.second.second);
|
throw Error(
|
||||||
|
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||||
|
outputName, localStore->printStorePath(step->drvPath));
|
||||||
|
addRoot(*optOutputPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register success in the database for all Build objects that
|
/* Register success in the database for all Build objects that
|
||||||
@@ -398,7 +406,7 @@ void State::failStep(
|
|||||||
Step::ptr step,
|
Step::ptr step,
|
||||||
BuildID buildId,
|
BuildID buildId,
|
||||||
const RemoteResult & result,
|
const RemoteResult & result,
|
||||||
Machine::ptr machine,
|
::Machine::ptr machine,
|
||||||
bool & stepFinished)
|
bool & stepFinished)
|
||||||
{
|
{
|
||||||
/* Register failure in the database for all Build objects that
|
/* Register failure in the database for all Build objects that
|
||||||
|
|||||||
@@ -85,12 +85,113 @@ system_time State::doDispatch()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
system_time now = std::chrono::system_clock::now();
|
||||||
|
|
||||||
/* Start steps until we're out of steps or slots. */
|
/* Start steps until we're out of steps or slots. */
|
||||||
auto sleepUntil = system_time::max();
|
auto sleepUntil = system_time::max();
|
||||||
bool keepGoing;
|
bool keepGoing;
|
||||||
|
|
||||||
|
/* Sort the runnable steps by priority. Priority is establised
|
||||||
|
as follows (in order of precedence):
|
||||||
|
|
||||||
|
- The global priority of the builds that depend on the
|
||||||
|
step. This allows admins to bump a build to the front of
|
||||||
|
the queue.
|
||||||
|
|
||||||
|
- The lowest used scheduling share of the jobsets depending
|
||||||
|
on the step.
|
||||||
|
|
||||||
|
- The local priority of the build, as set via the build's
|
||||||
|
meta.schedulingPriority field. Note that this is not
|
||||||
|
quite correct: the local priority should only be used to
|
||||||
|
establish priority between builds in the same jobset, but
|
||||||
|
here it's used between steps in different jobsets if they
|
||||||
|
happen to have the same lowest used scheduling share. But
|
||||||
|
that's not very likely.
|
||||||
|
|
||||||
|
- The lowest ID of the builds depending on the step;
|
||||||
|
i.e. older builds take priority over new ones.
|
||||||
|
|
||||||
|
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||||
|
runnable queue sorted by priority. */
|
||||||
|
struct StepInfo
|
||||||
|
{
|
||||||
|
Step::ptr step;
|
||||||
|
bool alreadyScheduled = false;
|
||||||
|
|
||||||
|
/* The lowest share used of any jobset depending on this
|
||||||
|
step. */
|
||||||
|
double lowestShareUsed = 1e9;
|
||||||
|
|
||||||
|
/* Info copied from step->state to ensure that the
|
||||||
|
comparator is a partial ordering (see MachineInfo). */
|
||||||
|
int highestGlobalPriority;
|
||||||
|
int highestLocalPriority;
|
||||||
|
BuildID lowestBuildID;
|
||||||
|
|
||||||
|
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||||
|
{
|
||||||
|
for (auto & jobset : step_.jobsets)
|
||||||
|
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||||
|
highestGlobalPriority = step_.highestGlobalPriority;
|
||||||
|
highestLocalPriority = step_.highestLocalPriority;
|
||||||
|
lowestBuildID = step_.lowestBuildID;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<StepInfo> runnableSorted;
|
||||||
|
|
||||||
|
struct RunnablePerType
|
||||||
|
{
|
||||||
|
unsigned int count{0};
|
||||||
|
std::chrono::seconds waitTime{0};
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||||
|
|
||||||
|
{
|
||||||
|
auto runnable_(runnable.lock());
|
||||||
|
runnableSorted.reserve(runnable_->size());
|
||||||
|
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||||
|
auto step = i->lock();
|
||||||
|
|
||||||
|
/* Remove dead steps. */
|
||||||
|
if (!step) {
|
||||||
|
i = runnable_->erase(i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
++i;
|
||||||
|
|
||||||
|
auto & r = runnablePerType[step->systemType];
|
||||||
|
r.count++;
|
||||||
|
|
||||||
|
/* Skip previously failed steps that aren't ready
|
||||||
|
to be retried. */
|
||||||
|
auto step_(step->state.lock());
|
||||||
|
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||||
|
if (step_->tries > 0 && step_->after > now) {
|
||||||
|
if (step_->after < sleepUntil)
|
||||||
|
sleepUntil = step_->after;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
runnableSorted.emplace_back(step, *step_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||||
|
[](const StepInfo & a, const StepInfo & b)
|
||||||
|
{
|
||||||
|
return
|
||||||
|
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||||
|
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||||
|
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||||
|
a.lowestBuildID < b.lowestBuildID;
|
||||||
|
});
|
||||||
|
|
||||||
do {
|
do {
|
||||||
system_time now = std::chrono::system_clock::now();
|
now = std::chrono::system_clock::now();
|
||||||
|
|
||||||
/* Copy the currentJobs field of each machine. This is
|
/* Copy the currentJobs field of each machine. This is
|
||||||
necessary to ensure that the sort comparator below is
|
necessary to ensure that the sort comparator below is
|
||||||
@@ -98,7 +199,7 @@ system_time State::doDispatch()
|
|||||||
filter out temporarily disabled machines. */
|
filter out temporarily disabled machines. */
|
||||||
struct MachineInfo
|
struct MachineInfo
|
||||||
{
|
{
|
||||||
Machine::ptr machine;
|
::Machine::ptr machine;
|
||||||
unsigned long currentJobs;
|
unsigned long currentJobs;
|
||||||
};
|
};
|
||||||
std::vector<MachineInfo> machinesSorted;
|
std::vector<MachineInfo> machinesSorted;
|
||||||
@@ -138,104 +239,6 @@ system_time State::doDispatch()
|
|||||||
a.currentJobs > b.currentJobs;
|
a.currentJobs > b.currentJobs;
|
||||||
});
|
});
|
||||||
|
|
||||||
/* Sort the runnable steps by priority. Priority is establised
|
|
||||||
as follows (in order of precedence):
|
|
||||||
|
|
||||||
- The global priority of the builds that depend on the
|
|
||||||
step. This allows admins to bump a build to the front of
|
|
||||||
the queue.
|
|
||||||
|
|
||||||
- The lowest used scheduling share of the jobsets depending
|
|
||||||
on the step.
|
|
||||||
|
|
||||||
- The local priority of the build, as set via the build's
|
|
||||||
meta.schedulingPriority field. Note that this is not
|
|
||||||
quite correct: the local priority should only be used to
|
|
||||||
establish priority between builds in the same jobset, but
|
|
||||||
here it's used between steps in different jobsets if they
|
|
||||||
happen to have the same lowest used scheduling share. But
|
|
||||||
that's not very likely.
|
|
||||||
|
|
||||||
- The lowest ID of the builds depending on the step;
|
|
||||||
i.e. older builds take priority over new ones.
|
|
||||||
|
|
||||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
|
||||||
runnable queue sorted by priority. */
|
|
||||||
struct StepInfo
|
|
||||||
{
|
|
||||||
Step::ptr step;
|
|
||||||
|
|
||||||
/* The lowest share used of any jobset depending on this
|
|
||||||
step. */
|
|
||||||
double lowestShareUsed = 1e9;
|
|
||||||
|
|
||||||
/* Info copied from step->state to ensure that the
|
|
||||||
comparator is a partial ordering (see MachineInfo). */
|
|
||||||
int highestGlobalPriority;
|
|
||||||
int highestLocalPriority;
|
|
||||||
BuildID lowestBuildID;
|
|
||||||
|
|
||||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
|
||||||
{
|
|
||||||
for (auto & jobset : step_.jobsets)
|
|
||||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
|
||||||
highestGlobalPriority = step_.highestGlobalPriority;
|
|
||||||
highestLocalPriority = step_.highestLocalPriority;
|
|
||||||
lowestBuildID = step_.lowestBuildID;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<StepInfo> runnableSorted;
|
|
||||||
|
|
||||||
struct RunnablePerType
|
|
||||||
{
|
|
||||||
unsigned int count{0};
|
|
||||||
std::chrono::seconds waitTime{0};
|
|
||||||
};
|
|
||||||
|
|
||||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
|
||||||
|
|
||||||
{
|
|
||||||
auto runnable_(runnable.lock());
|
|
||||||
runnableSorted.reserve(runnable_->size());
|
|
||||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
|
||||||
auto step = i->lock();
|
|
||||||
|
|
||||||
/* Remove dead steps. */
|
|
||||||
if (!step) {
|
|
||||||
i = runnable_->erase(i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
++i;
|
|
||||||
|
|
||||||
auto & r = runnablePerType[step->systemType];
|
|
||||||
r.count++;
|
|
||||||
|
|
||||||
/* Skip previously failed steps that aren't ready
|
|
||||||
to be retried. */
|
|
||||||
auto step_(step->state.lock());
|
|
||||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
|
||||||
if (step_->tries > 0 && step_->after > now) {
|
|
||||||
if (step_->after < sleepUntil)
|
|
||||||
sleepUntil = step_->after;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
runnableSorted.emplace_back(step, *step_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
|
||||||
[](const StepInfo & a, const StepInfo & b)
|
|
||||||
{
|
|
||||||
return
|
|
||||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
|
||||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
|
||||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
|
||||||
a.lowestBuildID < b.lowestBuildID;
|
|
||||||
});
|
|
||||||
|
|
||||||
/* Find a machine with a free slot and find a step to run
|
/* Find a machine with a free slot and find a step to run
|
||||||
on it. Once we find such a pair, we restart the outer
|
on it. Once we find such a pair, we restart the outer
|
||||||
loop because the machine sorting will have changed. */
|
loop because the machine sorting will have changed. */
|
||||||
@@ -245,6 +248,8 @@ system_time State::doDispatch()
|
|||||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||||
|
|
||||||
for (auto & stepInfo : runnableSorted) {
|
for (auto & stepInfo : runnableSorted) {
|
||||||
|
if (stepInfo.alreadyScheduled) continue;
|
||||||
|
|
||||||
auto & step(stepInfo.step);
|
auto & step(stepInfo.step);
|
||||||
|
|
||||||
/* Can this machine do this step? */
|
/* Can this machine do this step? */
|
||||||
@@ -271,6 +276,8 @@ system_time State::doDispatch()
|
|||||||
r.count--;
|
r.count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stepInfo.alreadyScheduled = true;
|
||||||
|
|
||||||
/* Make a slot reservation and start a thread to
|
/* Make a slot reservation and start a thread to
|
||||||
do the build. */
|
do the build. */
|
||||||
auto builderThread = std::thread(&State::builder, this,
|
auto builderThread = std::thread(&State::builder, this,
|
||||||
@@ -428,7 +435,7 @@ void Jobset::pruneSteps()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||||
: state(state), step(step), machine(machine)
|
: state(state), step(step), machine(machine)
|
||||||
{
|
{
|
||||||
machine->state->currentJobs++;
|
machine->state->currentJobs++;
|
||||||
|
|||||||
@@ -36,10 +36,12 @@ struct BuildOutput
|
|||||||
|
|
||||||
std::list<BuildProduct> products;
|
std::list<BuildProduct> products;
|
||||||
|
|
||||||
|
std::map<std::string, nix::StorePath> outputs;
|
||||||
|
|
||||||
std::map<std::string, BuildMetric> metrics;
|
std::map<std::string, BuildMetric> metrics;
|
||||||
};
|
};
|
||||||
|
|
||||||
BuildOutput getBuildOutput(
|
BuildOutput getBuildOutput(
|
||||||
nix::ref<nix::Store> store,
|
nix::ref<nix::Store> store,
|
||||||
NarMemberDatas & narMembers,
|
NarMemberDatas & narMembers,
|
||||||
const nix::Derivation & drv);
|
const nix::OutputPathMap derivationOutputs);
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
@@ -10,6 +11,7 @@
|
|||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
#include "signals.hh"
|
||||||
#include "state.hh"
|
#include "state.hh"
|
||||||
#include "hydra-build-result.hh"
|
#include "hydra-build-result.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
@@ -139,23 +141,41 @@ void State::parseMachines(const std::string & contents)
|
|||||||
if (tokens.size() < 3) continue;
|
if (tokens.size() < 3) continue;
|
||||||
tokens.resize(8);
|
tokens.resize(8);
|
||||||
|
|
||||||
auto machine = std::make_shared<Machine>();
|
|
||||||
machine->sshName = tokens[0];
|
|
||||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
|
||||||
machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2];
|
|
||||||
if (tokens[3] != "")
|
|
||||||
machine->maxJobs = string2Int<decltype(machine->maxJobs)>(tokens[3]).value();
|
|
||||||
else
|
|
||||||
machine->maxJobs = 1;
|
|
||||||
machine->speedFactor = atof(tokens[4].c_str());
|
|
||||||
if (tokens[5] == "-") tokens[5] = "";
|
if (tokens[5] == "-") tokens[5] = "";
|
||||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
auto supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||||
|
|
||||||
if (tokens[6] == "-") tokens[6] = "";
|
if (tokens[6] == "-") tokens[6] = "";
|
||||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
auto mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||||
for (auto & f : machine->mandatoryFeatures)
|
|
||||||
machine->supportedFeatures.insert(f);
|
for (auto & f : mandatoryFeatures)
|
||||||
if (tokens[7] != "" && tokens[7] != "-")
|
supportedFeatures.insert(f);
|
||||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
|
||||||
|
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
||||||
|
|
||||||
|
auto machine = std::make_shared<::Machine>(nix::Machine {
|
||||||
|
// `storeUri`, not yet used
|
||||||
|
"",
|
||||||
|
// `systemTypes`
|
||||||
|
tokenizeString<StringSet>(tokens[1], ","),
|
||||||
|
// `sshKey`
|
||||||
|
tokens[2] == "-" ? "" : tokens[2],
|
||||||
|
// `maxJobs`
|
||||||
|
tokens[3] != ""
|
||||||
|
? string2Int<MaxJobs>(tokens[3]).value()
|
||||||
|
: 1,
|
||||||
|
// `speedFactor`
|
||||||
|
atof(tokens[4].c_str()),
|
||||||
|
// `supportedFeatures`
|
||||||
|
std::move(supportedFeatures),
|
||||||
|
// `mandatoryFeatures`
|
||||||
|
std::move(mandatoryFeatures),
|
||||||
|
// `sshPublicHostKey`
|
||||||
|
tokens[7] != "" && tokens[7] != "-"
|
||||||
|
? base64Decode(tokens[7])
|
||||||
|
: "",
|
||||||
|
});
|
||||||
|
|
||||||
|
machine->sshName = tokens[0];
|
||||||
|
|
||||||
/* Re-use the State object of the previous machine with the
|
/* Re-use the State object of the previous machine with the
|
||||||
same name. */
|
same name. */
|
||||||
@@ -165,7 +185,7 @@ void State::parseMachines(const std::string & contents)
|
|||||||
else
|
else
|
||||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName);
|
printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName);
|
||||||
machine->state = i == oldMachines.end()
|
machine->state = i == oldMachines.end()
|
||||||
? std::make_shared<Machine::State>()
|
? std::make_shared<::Machine::State>()
|
||||||
: i->second->state;
|
: i->second->state;
|
||||||
newMachines[machine->sshName] = machine;
|
newMachines[machine->sshName] = machine;
|
||||||
}
|
}
|
||||||
@@ -174,9 +194,9 @@ void State::parseMachines(const std::string & contents)
|
|||||||
if (newMachines.find(m.first) == newMachines.end()) {
|
if (newMachines.find(m.first) == newMachines.end()) {
|
||||||
if (m.second->enabled)
|
if (m.second->enabled)
|
||||||
printInfo("removing machine ‘%1%’", m.first);
|
printInfo("removing machine ‘%1%’", m.first);
|
||||||
/* Add a disabled Machine object to make sure stats are
|
/* Add a disabled ::Machine object to make sure stats are
|
||||||
maintained. */
|
maintained. */
|
||||||
auto machine = std::make_shared<Machine>(*(m.second));
|
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||||
machine->enabled = false;
|
machine->enabled = false;
|
||||||
newMachines[m.first] = machine;
|
newMachines[m.first] = machine;
|
||||||
}
|
}
|
||||||
@@ -204,7 +224,7 @@ void State::monitorMachinesFile()
|
|||||||
parseMachines("localhost " +
|
parseMachines("localhost " +
|
||||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||||
+ concatStringsSep(",", settings.systemFeatures.get()));
|
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||||
machinesReadyLock.unlock();
|
machinesReadyLock.unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -311,10 +331,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
|||||||
|
|
||||||
if (r.affected_rows() == 0) goto restart;
|
if (r.affected_rows() == 0) goto restart;
|
||||||
|
|
||||||
for (auto & [name, output] : step->drv->outputs)
|
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||||
txn.exec_params0
|
txn.exec_params0
|
||||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
buildId, stepNr, name,
|
||||||
|
output
|
||||||
|
? std::optional { localStore->printStorePath(*output)}
|
||||||
|
: std::nullopt);
|
||||||
|
|
||||||
if (status == bsBusy)
|
if (status == bsBusy)
|
||||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||||
@@ -351,11 +374,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
|||||||
assert(result.logFile.find('\t') == std::string::npos);
|
assert(result.logFile.find('\t') == std::string::npos);
|
||||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||||
buildId, stepNr, result.logFile));
|
buildId, stepNr, result.logFile));
|
||||||
|
|
||||||
|
if (result.stepStatus == bsSuccess) {
|
||||||
|
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||||
|
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||||
|
assert(res.size());
|
||||||
|
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||||
|
// If we've finished building, all the paths should be known
|
||||||
|
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||||
|
txn.exec_params0
|
||||||
|
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||||
|
buildId, stepNr, name, localStore->printStorePath(output));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||||
Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath)
|
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||||
{
|
{
|
||||||
restart:
|
restart:
|
||||||
auto stepNr = allocBuildStep(txn, build->id);
|
auto stepNr = allocBuildStep(txn, build->id);
|
||||||
@@ -456,6 +491,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
|||||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||||
isCachedBuild ? 1 : 0);
|
isCachedBuild ? 1 : 0);
|
||||||
|
|
||||||
|
for (auto & [outputName, outputPath] : res.outputs) {
|
||||||
|
txn.exec_params0
|
||||||
|
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||||
|
build->id,
|
||||||
|
outputName,
|
||||||
|
localStore->printStorePath(outputPath)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||||
|
|
||||||
unsigned int productNr = 1;
|
unsigned int productNr = 1;
|
||||||
@@ -467,7 +511,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
|||||||
product.type,
|
product.type,
|
||||||
product.subtype,
|
product.subtype,
|
||||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||||
product.path,
|
product.path,
|
||||||
product.name,
|
product.name,
|
||||||
product.defaultPath);
|
product.defaultPath);
|
||||||
@@ -882,10 +926,17 @@ void State::run(BuildID buildOne)
|
|||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
auto conn(dbPool.get());
|
auto conn(dbPool.get());
|
||||||
receiver dumpStatus_(*conn, "dump_status");
|
try {
|
||||||
while (true) {
|
receiver dumpStatus_(*conn, "dump_status");
|
||||||
conn->await_notification();
|
while (true) {
|
||||||
dumpStatus(*conn);
|
conn->await_notification();
|
||||||
|
dumpStatus(*conn);
|
||||||
|
}
|
||||||
|
} catch (pqxx::broken_connection & connEx) {
|
||||||
|
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||||
|
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||||
|
conn.markBad();
|
||||||
|
sleep(10);
|
||||||
}
|
}
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printMsg(lvlError, "main thread: %s", e.what());
|
printMsg(lvlError, "main thread: %s", e.what());
|
||||||
|
|||||||
@@ -6,7 +6,46 @@
|
|||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
struct Extractor : ParseSink
|
|
||||||
|
struct NarMemberConstructor : CreateRegularFileSink
|
||||||
|
{
|
||||||
|
NarMemberData & curMember;
|
||||||
|
|
||||||
|
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||||
|
|
||||||
|
std::optional<uint64_t> expectedSize;
|
||||||
|
|
||||||
|
NarMemberConstructor(NarMemberData & curMember)
|
||||||
|
: curMember(curMember)
|
||||||
|
{ }
|
||||||
|
|
||||||
|
void isExecutable() override
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void preallocateContents(uint64_t size) override
|
||||||
|
{
|
||||||
|
expectedSize = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator () (std::string_view data) override
|
||||||
|
{
|
||||||
|
assert(expectedSize);
|
||||||
|
*curMember.fileSize += data.size();
|
||||||
|
hashSink(data);
|
||||||
|
if (curMember.contents) {
|
||||||
|
curMember.contents->append(data);
|
||||||
|
}
|
||||||
|
assert(curMember.fileSize <= expectedSize);
|
||||||
|
if (curMember.fileSize == expectedSize) {
|
||||||
|
auto [hash, len] = hashSink.finish();
|
||||||
|
assert(curMember.fileSize == len);
|
||||||
|
curMember.sha256 = hash;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Extractor : FileSystemObjectSink
|
||||||
{
|
{
|
||||||
std::unordered_set<Path> filesToKeep {
|
std::unordered_set<Path> filesToKeep {
|
||||||
"/nix-support/hydra-build-products",
|
"/nix-support/hydra-build-products",
|
||||||
@@ -15,7 +54,6 @@ struct Extractor : ParseSink
|
|||||||
};
|
};
|
||||||
|
|
||||||
NarMemberDatas & members;
|
NarMemberDatas & members;
|
||||||
NarMemberData * curMember = nullptr;
|
|
||||||
Path prefix;
|
Path prefix;
|
||||||
|
|
||||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||||
@@ -24,49 +62,24 @@ struct Extractor : ParseSink
|
|||||||
|
|
||||||
void createDirectory(const Path & path) override
|
void createDirectory(const Path & path) override
|
||||||
{
|
{
|
||||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||||
}
|
}
|
||||||
|
|
||||||
void createRegularFile(const Path & path) override
|
void createRegularFile(const Path & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||||
{
|
{
|
||||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
NarMemberConstructor nmc {
|
||||||
.type = FSAccessor::Type::tRegular,
|
members.insert_or_assign(prefix + path, NarMemberData {
|
||||||
.fileSize = 0,
|
.type = SourceAccessor::Type::tRegular,
|
||||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
.fileSize = 0,
|
||||||
}).first->second;
|
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||||
}
|
}).first->second,
|
||||||
|
};
|
||||||
std::optional<uint64_t> expectedSize;
|
func(nmc);
|
||||||
std::unique_ptr<HashSink> hashSink;
|
|
||||||
|
|
||||||
void preallocateContents(uint64_t size) override
|
|
||||||
{
|
|
||||||
expectedSize = size;
|
|
||||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
|
||||||
}
|
|
||||||
|
|
||||||
void receiveContents(std::string_view data) override
|
|
||||||
{
|
|
||||||
assert(expectedSize);
|
|
||||||
assert(curMember);
|
|
||||||
assert(hashSink);
|
|
||||||
*curMember->fileSize += data.size();
|
|
||||||
(*hashSink)(data);
|
|
||||||
if (curMember->contents) {
|
|
||||||
curMember->contents->append(data);
|
|
||||||
}
|
|
||||||
assert(curMember->fileSize <= expectedSize);
|
|
||||||
if (curMember->fileSize == expectedSize) {
|
|
||||||
auto [hash, len] = hashSink->finish();
|
|
||||||
assert(curMember->fileSize == len);
|
|
||||||
curMember->sha256 = hash;
|
|
||||||
hashSink.reset();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void createSymlink(const Path & path, const std::string & target) override
|
void createSymlink(const Path & path, const std::string & target) override
|
||||||
{
|
{
|
||||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "fs-accessor.hh"
|
#include "source-accessor.hh"
|
||||||
#include "types.hh"
|
#include "types.hh"
|
||||||
#include "serialise.hh"
|
#include "serialise.hh"
|
||||||
#include "hash.hh"
|
#include "hash.hh"
|
||||||
|
|
||||||
struct NarMemberData
|
struct NarMemberData
|
||||||
{
|
{
|
||||||
nix::FSAccessor::Type type;
|
nix::SourceAccessor::Type type;
|
||||||
std::optional<uint64_t> fileSize;
|
std::optional<uint64_t> fileSize;
|
||||||
std::optional<std::string> contents;
|
std::optional<std::string> contents;
|
||||||
std::optional<nix::Hash> sha256;
|
std::optional<nix::Hash> sha256;
|
||||||
|
|||||||
@@ -10,8 +10,14 @@ using namespace nix;
|
|||||||
void State::queueMonitor()
|
void State::queueMonitor()
|
||||||
{
|
{
|
||||||
while (true) {
|
while (true) {
|
||||||
|
auto conn(dbPool.get());
|
||||||
try {
|
try {
|
||||||
queueMonitorLoop();
|
queueMonitorLoop(*conn);
|
||||||
|
} catch (pqxx::broken_connection & e) {
|
||||||
|
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||||
|
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||||
|
conn.markBad();
|
||||||
|
sleep(10);
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError("queue monitor: %s", e.what());
|
printError("queue monitor: %s", e.what());
|
||||||
sleep(10); // probably a DB problem, so don't retry right away
|
sleep(10); // probably a DB problem, so don't retry right away
|
||||||
@@ -20,16 +26,14 @@ void State::queueMonitor()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void State::queueMonitorLoop()
|
void State::queueMonitorLoop(Connection & conn)
|
||||||
{
|
{
|
||||||
auto conn(dbPool.get());
|
receiver buildsAdded(conn, "builds_added");
|
||||||
|
receiver buildsRestarted(conn, "builds_restarted");
|
||||||
receiver buildsAdded(*conn, "builds_added");
|
receiver buildsCancelled(conn, "builds_cancelled");
|
||||||
receiver buildsRestarted(*conn, "builds_restarted");
|
receiver buildsDeleted(conn, "builds_deleted");
|
||||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
receiver buildsBumped(conn, "builds_bumped");
|
||||||
receiver buildsDeleted(*conn, "builds_deleted");
|
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||||
receiver buildsBumped(*conn, "builds_bumped");
|
|
||||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
|
||||||
|
|
||||||
auto destStore = getDestStore();
|
auto destStore = getDestStore();
|
||||||
|
|
||||||
@@ -39,17 +43,17 @@ void State::queueMonitorLoop()
|
|||||||
while (!quit) {
|
while (!quit) {
|
||||||
localStore->clearPathInfoCache();
|
localStore->clearPathInfoCache();
|
||||||
|
|
||||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
bool done = getQueuedBuilds(conn, destStore, lastBuildId);
|
||||||
|
|
||||||
if (buildOne && buildOneDone) quit = true;
|
if (buildOne && buildOneDone) quit = true;
|
||||||
|
|
||||||
/* Sleep until we get notification from the database about an
|
/* Sleep until we get notification from the database about an
|
||||||
event. */
|
event. */
|
||||||
if (done && !quit) {
|
if (done && !quit) {
|
||||||
conn->await_notification();
|
conn.await_notification();
|
||||||
nrQueueWakeups++;
|
nrQueueWakeups++;
|
||||||
} else
|
} else
|
||||||
conn->get_notifs();
|
conn.get_notifs();
|
||||||
|
|
||||||
if (auto lowestId = buildsAdded.get()) {
|
if (auto lowestId = buildsAdded.get()) {
|
||||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||||
@@ -61,11 +65,11 @@ void State::queueMonitorLoop()
|
|||||||
}
|
}
|
||||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||||
processQueueChange(*conn);
|
processQueueChange(conn);
|
||||||
}
|
}
|
||||||
if (jobsetSharesChanged.get()) {
|
if (jobsetSharesChanged.get()) {
|
||||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||||
processJobsetSharesChange(*conn);
|
processJobsetSharesChange(conn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -192,15 +196,19 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||||
|
|
||||||
if (!propagatedFrom) {
|
if (!propagatedFrom) {
|
||||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||||
if (i.second.second) {
|
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||||
auto res = txn.exec_params
|
auto res = optOutputPath
|
||||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
? txn.exec_params(
|
||||||
localStore->printStorePath(*i.second.second));
|
std::string { common } + " and path = $1",
|
||||||
if (!res[0][0].is_null()) {
|
localStore->printStorePath(*optOutputPath))
|
||||||
propagatedFrom = res[0][0].as<BuildID>();
|
: txn.exec_params(
|
||||||
break;
|
std::string { common } + " and drvPath = $1 and name = $2",
|
||||||
}
|
localStore->printStorePath(ex.step->drvPath),
|
||||||
|
outputName);
|
||||||
|
if (!res[0][0].is_null()) {
|
||||||
|
propagatedFrom = res[0][0].as<BuildID>();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -236,12 +244,10 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
/* If we didn't get a step, it means the step's outputs are
|
/* If we didn't get a step, it means the step's outputs are
|
||||||
all valid. So we mark this as a finished, cached build. */
|
all valid. So we mark this as a finished, cached build. */
|
||||||
if (!step) {
|
if (!step) {
|
||||||
auto drv = localStore->readDerivation(build->drvPath);
|
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
|
||||||
|
|
||||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||||
if (i.second.second)
|
addRoot(i.second);
|
||||||
addRoot(*i.second.second);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
@@ -292,7 +298,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
try {
|
try {
|
||||||
createBuild(build);
|
createBuild(build);
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -315,7 +321,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
|||||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
|
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
|
||||||
prom.queue_checks_early_exits.Increment();
|
prom.queue_checks_early_exits.Increment();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
prom.queue_checks_finished.Increment();
|
prom.queue_checks_finished.Increment();
|
||||||
@@ -464,10 +470,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
|
|
||||||
step->systemType = step->drv->platform;
|
step->systemType = step->drv->platform;
|
||||||
{
|
{
|
||||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
StringSet features = step->requiredSystemFeatures = step->parsedDrv->getRequiredSystemFeatures();
|
||||||
StringSet features;
|
|
||||||
if (i != step->drv->env.end())
|
|
||||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
|
||||||
if (step->preferLocalBuild)
|
if (step->preferLocalBuild)
|
||||||
features.insert("local");
|
features.insert("local");
|
||||||
if (!features.empty()) {
|
if (!features.empty()) {
|
||||||
@@ -481,26 +484,41 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
throw PreviousFailure{step};
|
throw PreviousFailure{step};
|
||||||
|
|
||||||
/* Are all outputs valid? */
|
/* Are all outputs valid? */
|
||||||
|
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||||
bool valid = true;
|
bool valid = true;
|
||||||
DerivationOutputs missing;
|
std::map<DrvOutput, std::optional<StorePath>> missing;
|
||||||
for (auto & i : step->drv->outputs)
|
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
auto outputHash = outputHashes.at(outputName);
|
||||||
valid = false;
|
if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath))
|
||||||
missing.insert_or_assign(i.first, i.second);
|
continue;
|
||||||
}
|
valid = false;
|
||||||
|
missing.insert({{outputHash, outputName}, maybeOutputPath});
|
||||||
|
}
|
||||||
|
|
||||||
/* Try to copy the missing paths from the local store or from
|
/* Try to copy the missing paths from the local store or from
|
||||||
substitutes. */
|
substitutes. */
|
||||||
if (!missing.empty()) {
|
if (!missing.empty()) {
|
||||||
|
|
||||||
size_t avail = 0;
|
size_t avail = 0;
|
||||||
for (auto & i : missing) {
|
for (auto & [i, pathOpt] : missing) {
|
||||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
// If we don't know the output path from the destination
|
||||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
// store, see if the local store can tell us.
|
||||||
|
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||||
|
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||||
|
pathOpt = maybeRealisation->outPath;
|
||||||
|
|
||||||
|
if (!pathOpt) {
|
||||||
|
// No hope of getting the store object if we don't know
|
||||||
|
// the path.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
auto & path = *pathOpt;
|
||||||
|
|
||||||
|
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||||
avail++;
|
avail++;
|
||||||
else if (useSubstitutes) {
|
else if (useSubstitutes) {
|
||||||
SubstitutablePathInfos infos;
|
SubstitutablePathInfos infos;
|
||||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||||
if (infos.size() == 1)
|
if (infos.size() == 1)
|
||||||
avail++;
|
avail++;
|
||||||
}
|
}
|
||||||
@@ -508,26 +526,29 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
|
|
||||||
if (missing.size() == avail) {
|
if (missing.size() == avail) {
|
||||||
valid = true;
|
valid = true;
|
||||||
for (auto & i : missing) {
|
for (auto & [i, pathOpt] : missing) {
|
||||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
// If we found everything, then we should know the path
|
||||||
|
// to every missing store object now.
|
||||||
|
assert(pathOpt);
|
||||||
|
auto & path = *pathOpt;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
time_t startTime = time(0);
|
time_t startTime = time(0);
|
||||||
|
|
||||||
if (localStore->isValidPath(*path))
|
if (localStore->isValidPath(path))
|
||||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||||
localStore->printStorePath(*path),
|
localStore->printStorePath(path),
|
||||||
localStore->printStorePath(drvPath));
|
localStore->printStorePath(drvPath));
|
||||||
else {
|
else {
|
||||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||||
localStore->printStorePath(*path),
|
localStore->printStorePath(path),
|
||||||
localStore->printStorePath(drvPath));
|
localStore->printStorePath(drvPath));
|
||||||
localStore->ensurePath(*path);
|
localStore->ensurePath(path);
|
||||||
// FIXME: should copy directly from substituter to destStore.
|
// FIXME: should copy directly from substituter to destStore.
|
||||||
}
|
}
|
||||||
|
|
||||||
copyClosure(*localStore, *destStore,
|
copyClosure(*localStore, *destStore,
|
||||||
StorePathSet { *path },
|
StorePathSet { path },
|
||||||
NoRepair, CheckSigs, NoSubstitute);
|
NoRepair, CheckSigs, NoSubstitute);
|
||||||
|
|
||||||
time_t stopTime = time(0);
|
time_t stopTime = time(0);
|
||||||
@@ -535,13 +556,13 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
{
|
{
|
||||||
auto mc = startDbUpdate();
|
auto mc = startDbUpdate();
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||||
localStore->printStorePath(*path),
|
localStore->printStorePath(path),
|
||||||
localStore->printStorePath(drvPath),
|
localStore->printStorePath(drvPath),
|
||||||
e.what());
|
e.what());
|
||||||
valid = false;
|
valid = false;
|
||||||
@@ -561,7 +582,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
|||||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||||
|
|
||||||
/* Create steps for the dependencies. */
|
/* Create steps for the dependencies. */
|
||||||
for (auto & i : step->drv->inputDrvs) {
|
for (auto & i : step->drv->inputDrvs.map) {
|
||||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||||
if (dep) {
|
if (dep) {
|
||||||
auto step_(step->state.lock());
|
auto step_(step->state.lock());
|
||||||
@@ -640,17 +661,19 @@ void State::processJobsetSharesChange(Connection & conn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||||
{
|
{
|
||||||
|
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||||
|
|
||||||
{
|
{
|
||||||
pqxx::work txn(conn);
|
pqxx::work txn(conn);
|
||||||
|
|
||||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
for (auto & [name, output] : derivationOutputs) {
|
||||||
auto r = txn.exec_params
|
auto r = txn.exec_params
|
||||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||||
"join BuildOutputs o on b.id = o.build "
|
"join BuildOutputs o on b.id = o.build "
|
||||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||||
localStore->printStorePath(*output.second));
|
localStore->printStorePath(output));
|
||||||
if (r.empty()) continue;
|
if (r.empty()) continue;
|
||||||
BuildID id = r[0][0].as<BuildID>();
|
BuildID id = r[0][0].as<BuildID>();
|
||||||
|
|
||||||
@@ -677,7 +700,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||||||
product.fileSize = row[2].as<off_t>();
|
product.fileSize = row[2].as<off_t>();
|
||||||
}
|
}
|
||||||
if (!row[3].is_null())
|
if (!row[3].is_null())
|
||||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||||
if (!row[4].is_null())
|
if (!row[4].is_null())
|
||||||
product.path = row[4].as<std::string>();
|
product.path = row[4].as<std::string>();
|
||||||
product.name = row[5].as<std::string>();
|
product.name = row[5].as<std::string>();
|
||||||
@@ -704,5 +727,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
|||||||
}
|
}
|
||||||
|
|
||||||
NarMemberDatas narMembers;
|
NarMemberDatas narMembers;
|
||||||
return getBuildOutput(destStore, narMembers, drv);
|
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,9 @@
|
|||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "sync.hh"
|
#include "sync.hh"
|
||||||
#include "nar-extractor.hh"
|
#include "nar-extractor.hh"
|
||||||
|
#include "serve-protocol.hh"
|
||||||
|
#include "serve-protocol-impl.hh"
|
||||||
|
#include "machines.hh"
|
||||||
|
|
||||||
|
|
||||||
typedef unsigned int BuildID;
|
typedef unsigned int BuildID;
|
||||||
@@ -78,6 +81,8 @@ struct RemoteResult
|
|||||||
{
|
{
|
||||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateWithBuildResult(const nix::BuildResult &);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@@ -231,17 +236,13 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
|||||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||||
|
|
||||||
|
|
||||||
struct Machine
|
struct Machine : nix::Machine
|
||||||
{
|
{
|
||||||
typedef std::shared_ptr<Machine> ptr;
|
typedef std::shared_ptr<Machine> ptr;
|
||||||
|
|
||||||
bool enabled{true};
|
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||||
|
we are not yet used to, but once we are, we don't need this. */
|
||||||
std::string sshName, sshKey;
|
std::string sshName;
|
||||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
|
||||||
unsigned int maxJobs = 1;
|
|
||||||
float speedFactor = 1.0;
|
|
||||||
std::string sshPublicHostKey;
|
|
||||||
|
|
||||||
struct State {
|
struct State {
|
||||||
typedef std::shared_ptr<State> ptr;
|
typedef std::shared_ptr<State> ptr;
|
||||||
@@ -297,6 +298,12 @@ struct Machine
|
|||||||
std::regex r("^(ssh://|ssh-ng://)?localhost$");
|
std::regex r("^(ssh://|ssh-ng://)?localhost$");
|
||||||
return std::regex_search(sshName, r);
|
return std::regex_search(sshName, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A connection to a machine
|
||||||
|
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||||
|
// Backpointer to the machine
|
||||||
|
ptr machine;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@@ -430,7 +437,7 @@ private:
|
|||||||
|
|
||||||
/* How often the build steps of a jobset should be repeated in
|
/* How often the build steps of a jobset should be repeated in
|
||||||
order to detect non-determinism. */
|
order to detect non-determinism. */
|
||||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||||
|
|
||||||
bool uploadLogsToBinaryCache;
|
bool uploadLogsToBinaryCache;
|
||||||
|
|
||||||
@@ -485,13 +492,13 @@ private:
|
|||||||
const std::string & machine);
|
const std::string & machine);
|
||||||
|
|
||||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||||
|
|
||||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||||
|
|
||||||
void queueMonitor();
|
void queueMonitor();
|
||||||
|
|
||||||
void queueMonitorLoop();
|
void queueMonitorLoop(Connection & conn);
|
||||||
|
|
||||||
/* Check the queue for new builds. */
|
/* Check the queue for new builds. */
|
||||||
bool getQueuedBuilds(Connection & conn,
|
bool getQueuedBuilds(Connection & conn,
|
||||||
@@ -501,7 +508,7 @@ private:
|
|||||||
void processQueueChange(Connection & conn);
|
void processQueueChange(Connection & conn);
|
||||||
|
|
||||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||||
const nix::Derivation & drv);
|
const nix::StorePath & drvPath);
|
||||||
|
|
||||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||||
@@ -543,8 +550,7 @@ private:
|
|||||||
|
|
||||||
void buildRemote(nix::ref<nix::Store> destStore,
|
void buildRemote(nix::ref<nix::Store> destStore,
|
||||||
Machine::ptr machine, Step::ptr step,
|
Machine::ptr machine, Step::ptr step,
|
||||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
const nix::ServeProto::BuildOptions & buildOptions,
|
||||||
unsigned int repeats,
|
|
||||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||||
std::function<void(StepState)> updateStep,
|
std::function<void(StepState)> updateStep,
|
||||||
NarMemberDatas & narMembers);
|
NarMemberDatas & narMembers);
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use strict;
|
|||||||
use warnings;
|
use warnings;
|
||||||
use base 'Hydra::Base::Controller::REST';
|
use base 'Hydra::Base::Controller::REST';
|
||||||
use List::SomeUtils qw(any);
|
use List::SomeUtils qw(any);
|
||||||
use Nix::Store;
|
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Hydra::Helper::CatalystUtils;
|
use Hydra::Helper::CatalystUtils;
|
||||||
|
|
||||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
|||||||
my $outputs = {};
|
my $outputs = {};
|
||||||
foreach my $output (@outputs) {
|
foreach my $output (@outputs) {
|
||||||
my $outPath = $output->get_column("outpath");
|
my $outPath = $output->get_column("outpath");
|
||||||
next if $checkValidity && !isValidPath($outPath);
|
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||||
$outputs->{$output->get_column("outname")} = $outPath;
|
$outputs->{$output->get_column("outname")} = $outPath;
|
||||||
push @storePaths, $outPath;
|
push @storePaths, $outPath;
|
||||||
# Put the system type in the manifest (for top-level
|
# Put the system type in the manifest (for top-level
|
||||||
|
|||||||
@@ -95,6 +95,7 @@ sub get_legacy_ldap_config {
|
|||||||
"hydra_bump-to-front" => [ "bump-to-front" ],
|
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||||
"hydra_cancel-build" => [ "cancel-build" ],
|
"hydra_cancel-build" => [ "cancel-build" ],
|
||||||
"hydra_create-projects" => [ "create-projects" ],
|
"hydra_create-projects" => [ "create-projects" ],
|
||||||
|
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@@ -159,6 +160,7 @@ sub valid_roles {
|
|||||||
"bump-to-front",
|
"bump-to-front",
|
||||||
"cancel-build",
|
"cancel-build",
|
||||||
"create-projects",
|
"create-projects",
|
||||||
|
"eval-jobset",
|
||||||
"restart-jobs",
|
"restart-jobs",
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -239,6 +239,8 @@ sub triggerJobset {
|
|||||||
sub push : Chained('api') PathPart('push') Args(0) {
|
sub push : Chained('api') PathPart('push') Args(0) {
|
||||||
my ($self, $c) = @_;
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
requirePost($c);
|
||||||
|
|
||||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
my $force = exists $c->request->query_params->{force};
|
my $force = exists $c->request->query_params->{force};
|
||||||
@@ -246,19 +248,24 @@ sub push : Chained('api') PathPart('push') Args(0) {
|
|||||||
foreach my $s (@jobsets) {
|
foreach my $s (@jobsets) {
|
||||||
my ($p, $j) = parseJobsetName($s);
|
my ($p, $j) = parseJobsetName($s);
|
||||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||||
|
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||||
triggerJobset($self, $c, $jobset, $force);
|
triggerJobset($self, $c, $jobset, $force);
|
||||||
}
|
}
|
||||||
|
|
||||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||||
foreach my $r (@repos) {
|
foreach my $r (@repos) {
|
||||||
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
|
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||||
{
|
{
|
||||||
join => 'project',
|
join => 'project',
|
||||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||||
order_by => 'me.id DESC'
|
order_by => 'me.id DESC'
|
||||||
});
|
});
|
||||||
|
foreach my $jobset (@jobsets) {
|
||||||
|
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||||
|
triggerJobset($self, $c, $jobset, $force)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
$self->status_ok(
|
$self->status_ok(
|
||||||
@@ -285,6 +292,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
|||||||
$c->response->body("");
|
$c->response->body("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||||
|
my ($self, $c) = @_;
|
||||||
|
|
||||||
|
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||||
|
|
||||||
|
my $in = $c->request->{data};
|
||||||
|
my $url = $in->{repository}->{clone_url} or die;
|
||||||
|
$url =~ s/.git$//;
|
||||||
|
print STDERR "got push from Gitea repository $url\n";
|
||||||
|
|
||||||
|
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||||
|
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||||
|
{ join => 'project'
|
||||||
|
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||||
|
});
|
||||||
|
$c->response->body("");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
1;
|
1;
|
||||||
|
|||||||
@@ -10,11 +10,10 @@ use File::Basename;
|
|||||||
use File::LibMagic;
|
use File::LibMagic;
|
||||||
use File::stat;
|
use File::stat;
|
||||||
use Data::Dump qw(dump);
|
use Data::Dump qw(dump);
|
||||||
use Nix::Store;
|
|
||||||
use Nix::Config;
|
|
||||||
use List::SomeUtils qw(all);
|
use List::SomeUtils qw(all);
|
||||||
use Encode;
|
use Encode;
|
||||||
use JSON::PP;
|
use JSON::PP;
|
||||||
|
use WWW::Form::UrlEncoded::PP qw();
|
||||||
|
|
||||||
use feature 'state';
|
use feature 'state';
|
||||||
|
|
||||||
@@ -78,14 +77,16 @@ sub build_GET {
|
|||||||
|
|
||||||
$c->stash->{template} = 'build.tt';
|
$c->stash->{template} = 'build.tt';
|
||||||
$c->stash->{isLocalStore} = isLocalStore();
|
$c->stash->{isLocalStore} = isLocalStore();
|
||||||
|
# XXX: If the derivation is content-addressed then this will always return
|
||||||
|
# false because `$_->path` will be empty
|
||||||
$c->stash->{available} =
|
$c->stash->{available} =
|
||||||
$c->stash->{isLocalStore}
|
$c->stash->{isLocalStore}
|
||||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||||
: 1;
|
: 1;
|
||||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||||
|
|
||||||
if ($build->finished && $build->iscachedbuild) {
|
if ($build->finished && $build->iscachedbuild) {
|
||||||
my $path = ($build->buildoutputs)[0]->path or die;
|
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||||
if (defined $cachedBuildStep) {
|
if (defined $cachedBuildStep) {
|
||||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||||
@@ -139,7 +140,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
|||||||
$c->stash->{step} = $step;
|
$c->stash->{step} = $step;
|
||||||
|
|
||||||
my $drvPath = $step->drvpath;
|
my $drvPath = $step->drvpath;
|
||||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||||
showLog($c, $mode, $log_uri);
|
showLog($c, $mode, $log_uri);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,7 +149,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
|
|||||||
my ($self, $c, $mode) = @_;
|
my ($self, $c, $mode) = @_;
|
||||||
|
|
||||||
my $drvPath = $c->stash->{build}->drvpath;
|
my $drvPath = $c->stash->{build}->drvpath;
|
||||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||||
showLog($c, $mode, $log_uri);
|
showLog($c, $mode, $log_uri);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,6 +234,9 @@ sub serveFile {
|
|||||||
}
|
}
|
||||||
|
|
||||||
elsif ($ls->{type} eq "regular") {
|
elsif ($ls->{type} eq "regular") {
|
||||||
|
# Have the hosted data considered its own origin to avoid being a giant
|
||||||
|
# XSS hole.
|
||||||
|
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||||
|
|
||||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||||
@@ -306,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
|||||||
error($c, "This build is not finished yet.") unless $build->finished;
|
error($c, "This build is not finished yet.") unless $build->finished;
|
||||||
my $output = $build->buildoutputs->find({name => $outputName});
|
my $output = $build->buildoutputs->find({name => $outputName});
|
||||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||||
|
|
||||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||||
$c->stash->{current_view} = 'NixNAR';
|
$c->stash->{current_view} = 'NixNAR';
|
||||||
@@ -423,7 +427,7 @@ sub getDependencyGraph {
|
|||||||
};
|
};
|
||||||
$$done{$path} = $node;
|
$$done{$path} = $node;
|
||||||
my @refs;
|
my @refs;
|
||||||
foreach my $ref (queryReferences($path)) {
|
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||||
next if $ref eq $path;
|
next if $ref eq $path;
|
||||||
next unless $runtime || $ref =~ /\.drv$/;
|
next unless $runtime || $ref =~ /\.drv$/;
|
||||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||||
@@ -431,7 +435,7 @@ sub getDependencyGraph {
|
|||||||
}
|
}
|
||||||
# Show in reverse topological order to flatten the graph.
|
# Show in reverse topological order to flatten the graph.
|
||||||
# Should probably do a proper BFS.
|
# Should probably do a proper BFS.
|
||||||
my @sorted = reverse topoSortPaths(@refs);
|
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -444,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
|||||||
my $build = $c->stash->{build};
|
my $build = $c->stash->{build};
|
||||||
my $drvPath = $build->drvpath;
|
my $drvPath = $build->drvpath;
|
||||||
|
|
||||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||||
|
|
||||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||||
|
|
||||||
@@ -459,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
|||||||
|
|
||||||
requireLocalStore($c);
|
requireLocalStore($c);
|
||||||
|
|
||||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||||
|
|
||||||
my $done = {};
|
my $done = {};
|
||||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||||
@@ -479,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
|||||||
if (isLocalStore) {
|
if (isLocalStore) {
|
||||||
foreach my $out ($build->buildoutputs) {
|
foreach my $out ($build->buildoutputs) {
|
||||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||||
unless isValidPath($out->path);
|
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,8 +16,11 @@ use List::Util qw[min max];
|
|||||||
use List::SomeUtils qw{any};
|
use List::SomeUtils qw{any};
|
||||||
use Net::Prometheus;
|
use Net::Prometheus;
|
||||||
use Types::Standard qw/StrMatch/;
|
use Types::Standard qw/StrMatch/;
|
||||||
|
use WWW::Form::UrlEncoded::PP qw();
|
||||||
|
|
||||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||||
|
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||||
|
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||||
|
|
||||||
# Put this controller at top-level.
|
# Put this controller at top-level.
|
||||||
__PACKAGE__->config->{namespace} = '';
|
__PACKAGE__->config->{namespace} = '';
|
||||||
@@ -32,6 +35,7 @@ sub noLoginNeeded {
|
|||||||
|
|
||||||
return $whitelisted ||
|
return $whitelisted ||
|
||||||
$c->request->path eq "api/push-github" ||
|
$c->request->path eq "api/push-github" ||
|
||||||
|
$c->request->path eq "api/push-gitea" ||
|
||||||
$c->request->path eq "google-login" ||
|
$c->request->path eq "google-login" ||
|
||||||
$c->request->path eq "github-redirect" ||
|
$c->request->path eq "github-redirect" ||
|
||||||
$c->request->path eq "github-login" ||
|
$c->request->path eq "github-login" ||
|
||||||
@@ -77,7 +81,7 @@ sub begin :Private {
|
|||||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||||
|
|
||||||
# XSRF protection: require POST requests to have the same origin.
|
# XSRF protection: require POST requests to have the same origin.
|
||||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||||
my $referer = $c->req->header('Referer');
|
my $referer = $c->req->header('Referer');
|
||||||
$referer //= $c->req->header('Origin');
|
$referer //= $c->req->header('Origin');
|
||||||
my $base = $c->req->base;
|
my $base = $c->req->base;
|
||||||
@@ -355,6 +359,33 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||||
|
my ($self, $c, $realisation) = @_;
|
||||||
|
|
||||||
|
if (!isLocalStore) {
|
||||||
|
notFound($c, "There is no binary cache here.");
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||||
|
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||||
|
|
||||||
|
if (!$rawRealisation) {
|
||||||
|
$c->response->status(404);
|
||||||
|
$c->response->content_type('text/plain');
|
||||||
|
$c->stash->{plain}->{data} = "does not exist\n";
|
||||||
|
$c->forward('Hydra::View::Plain');
|
||||||
|
setCacheHeaders($c, 60 * 60);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
$c->response->content_type('text/plain');
|
||||||
|
$c->stash->{plain}->{data} = $rawRealisation;
|
||||||
|
$c->forward('Hydra::View::Plain');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||||
my ($self, $c, $narinfo) = @_;
|
my ($self, $c, $narinfo) = @_;
|
||||||
|
|
||||||
@@ -366,7 +397,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
|||||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||||
|
|
||||||
die("Hash length was not 32") if length($hash) != 32;
|
die("Hash length was not 32") if length($hash) != 32;
|
||||||
my $path = queryPathFromHashPart($hash);
|
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||||
|
|
||||||
if (!$path) {
|
if (!$path) {
|
||||||
$c->response->status(404);
|
$c->response->status(404);
|
||||||
@@ -524,7 +555,7 @@ sub log :Local :Args(1) {
|
|||||||
my $logPrefix = $c->config->{log_prefix};
|
my $logPrefix = $c->config->{log_prefix};
|
||||||
|
|
||||||
if (defined $logPrefix) {
|
if (defined $logPrefix) {
|
||||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||||
} else {
|
} else {
|
||||||
notFound($c, "The build log of $drvPath is not available.");
|
notFound($c, "The build log of $drvPath is not available.");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ our @EXPORT = qw(
|
|||||||
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
|
||||||
requireBumpPrivileges
|
requireBumpPrivileges
|
||||||
requireCancelBuildPrivileges
|
requireCancelBuildPrivileges
|
||||||
|
requireEvalJobsetPrivileges
|
||||||
trim
|
trim
|
||||||
getLatestFinishedEval getFirstEval
|
getLatestFinishedEval getFirstEval
|
||||||
paramToList
|
paramToList
|
||||||
@@ -186,6 +187,27 @@ sub isProjectOwner {
|
|||||||
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sub hasEvalJobsetRole {
|
||||||
|
my ($c) = @_;
|
||||||
|
return $c->user_exists && $c->check_user_roles("eval-jobset");
|
||||||
|
}
|
||||||
|
|
||||||
|
sub mayEvalJobset {
|
||||||
|
my ($c, $project) = @_;
|
||||||
|
return
|
||||||
|
$c->user_exists &&
|
||||||
|
(isAdmin($c) ||
|
||||||
|
hasEvalJobsetRole($c) ||
|
||||||
|
isProjectOwner($c, $project));
|
||||||
|
}
|
||||||
|
|
||||||
|
sub requireEvalJobsetPrivileges {
|
||||||
|
my ($c, $project) = @_;
|
||||||
|
requireUser($c);
|
||||||
|
accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.")
|
||||||
|
unless mayEvalJobset($c, $project);
|
||||||
|
}
|
||||||
|
|
||||||
sub hasCancelBuildRole {
|
sub hasCancelBuildRole {
|
||||||
my ($c) = @_;
|
my ($c) = @_;
|
||||||
return $c->user_exists && $c->check_user_roles('cancel-build');
|
return $c->user_exists && $c->check_user_roles('cancel-build');
|
||||||
@@ -272,7 +294,7 @@ sub requireAdmin {
|
|||||||
|
|
||||||
sub requirePost {
|
sub requirePost {
|
||||||
my ($c) = @_;
|
my ($c) = @_;
|
||||||
error($c, "Request must be POSTed.") if $c->request->method ne "POST";
|
error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -40,8 +40,11 @@ our @EXPORT = qw(
|
|||||||
registerRoot
|
registerRoot
|
||||||
restartBuilds
|
restartBuilds
|
||||||
run
|
run
|
||||||
|
$MACHINE_LOCAL_STORE
|
||||||
);
|
);
|
||||||
|
|
||||||
|
our $MACHINE_LOCAL_STORE = Nix::Store->new();
|
||||||
|
|
||||||
|
|
||||||
sub getHydraHome {
|
sub getHydraHome {
|
||||||
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
||||||
@@ -171,6 +174,9 @@ sub getDrvLogPath {
|
|||||||
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
|
||||||
return $_ if -f $_;
|
return $_ if -f $_;
|
||||||
}
|
}
|
||||||
|
for ($fn . $bucketed, $fn . $bucketed . ".zst") {
|
||||||
|
return $_ if -f $_;
|
||||||
|
}
|
||||||
return undef;
|
return undef;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -187,6 +193,10 @@ sub findLog {
|
|||||||
|
|
||||||
return undef if scalar @outPaths == 0;
|
return undef if scalar @outPaths == 0;
|
||||||
|
|
||||||
|
# Filter out any NULLs. Content-addressed derivations
|
||||||
|
# that haven't built yet or failed to build may have a NULL outPath.
|
||||||
|
@outPaths = grep {defined} @outPaths;
|
||||||
|
|
||||||
my @steps = $c->model('DB::BuildSteps')->search(
|
my @steps = $c->model('DB::BuildSteps')->search(
|
||||||
{ path => { -in => [@outPaths] } },
|
{ path => { -in => [@outPaths] } },
|
||||||
{ select => ["drvpath"]
|
{ select => ["drvpath"]
|
||||||
@@ -494,7 +504,7 @@ sub restartBuilds {
|
|||||||
$builds = $builds->search({ finished => 1 });
|
$builds = $builds->search({ finished => 1 });
|
||||||
|
|
||||||
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
||||||
next if !isValidPath($build->drvpath);
|
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||||
registerRoot $build->drvpath;
|
registerRoot $build->drvpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -38,9 +37,9 @@ sub fetchInput {
|
|||||||
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
||||||
{uri => $uri, revision => $revision});
|
{uri => $uri, revision => $revision});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
} else {
|
} else {
|
||||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
|||||||
($sha256, $storePath) = split ' ', $stdout;
|
($sha256, $storePath) = split ' ', $stdout;
|
||||||
|
|
||||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||||
addTempRoot($storePath);
|
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||||
|
|||||||
@@ -9,11 +9,24 @@ use Hydra::Helper::CatalystUtils;
|
|||||||
sub stepFinished {
|
sub stepFinished {
|
||||||
my ($self, $step, $logPath) = @_;
|
my ($self, $step, $logPath) = @_;
|
||||||
|
|
||||||
my $doCompress = $self->{config}->{'compress_build_logs'} // "1";
|
my $doCompress = $self->{config}->{'compress_build_logs'} // '1';
|
||||||
|
my $silent = $self->{config}->{'compress_build_logs_silent'} // '0';
|
||||||
|
my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2';
|
||||||
|
|
||||||
if ($doCompress eq "1" && -e $logPath) {
|
if (not -e $logPath or $doCompress ne "1") {
|
||||||
print STDERR "compressing ‘$logPath’...\n";
|
return;
|
||||||
system("bzip2", "--force", $logPath);
|
}
|
||||||
|
|
||||||
|
if ($silent ne '1') {
|
||||||
|
print STDERR "compressing '$logPath' with $compression...\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($compression eq 'bzip2') {
|
||||||
|
system('bzip2', '--force', $logPath);
|
||||||
|
} elsif ($compression eq 'zstd') {
|
||||||
|
system('zstd', '--rm', '--quiet', '-T0', $logPath);
|
||||||
|
} else {
|
||||||
|
print STDERR "unknown compression type '$compression'\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
|||||||
{uri => $uri, revision => $revision},
|
{uri => $uri, revision => $revision},
|
||||||
{rows => 1});
|
{rows => 1});
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
$revision = $cachedInput->revision;
|
$revision = $cachedInput->revision;
|
||||||
@@ -75,8 +74,8 @@ sub fetchInput {
|
|||||||
die "darcs changes --count failed" if $? != 0;
|
die "darcs changes --count failed" if $? != 0;
|
||||||
|
|
||||||
system "rm", "-rf", "$tmpDir/export/_darcs";
|
system "rm", "-rf", "$tmpDir/export/_darcs";
|
||||||
$storePath = addToStore("$tmpDir/export", 1, "sha256");
|
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
|
||||||
$sha256 = queryPathHash($storePath);
|
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
|
||||||
$sha256 =~ s/sha256://;
|
$sha256 =~ s/sha256://;
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
|
|||||||
@@ -186,9 +186,9 @@ sub fetchInput {
|
|||||||
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
||||||
{rows => 1});
|
{rows => 1});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
$revision = $cachedInput->revision;
|
$revision = $cachedInput->revision;
|
||||||
@@ -217,7 +217,7 @@ sub fetchInput {
|
|||||||
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
||||||
|
|
||||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||||
addTempRoot($storePath);
|
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||||
|
|||||||
@@ -88,10 +88,6 @@ sub buildQueued {
|
|||||||
common(@_, [], 0);
|
common(@_, [], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
sub buildStarted {
|
|
||||||
common(@_, [], 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
sub buildFinished {
|
sub buildFinished {
|
||||||
common(@_, 2);
|
common(@_, 2);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Nix::Store;
|
|
||||||
use Fcntl qw(:flock);
|
use Fcntl qw(:flock);
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
@@ -68,9 +67,9 @@ sub fetchInput {
|
|||||||
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
||||||
{uri => $uri, branch => $branch, revision => $revision});
|
{uri => $uri, branch => $branch, revision => $revision});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
} else {
|
} else {
|
||||||
@@ -85,7 +84,7 @@ sub fetchInput {
|
|||||||
($sha256, $storePath) = split ' ', $stdout;
|
($sha256, $storePath) = split ' ', $stdout;
|
||||||
|
|
||||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||||
addTempRoot($storePath);
|
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use warnings;
|
|||||||
use parent 'Hydra::Plugin';
|
use parent 'Hydra::Plugin';
|
||||||
use POSIX qw(strftime);
|
use POSIX qw(strftime);
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -30,7 +29,7 @@ sub fetchInput {
|
|||||||
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
||||||
{rows => 1, order_by => "lastseen DESC"});
|
{rows => 1, order_by => "lastseen DESC"});
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
$sha256 = $cachedInput->sha256hash;
|
$sha256 = $cachedInput->sha256hash;
|
||||||
$timestamp = $cachedInput->timestamp;
|
$timestamp = $cachedInput->timestamp;
|
||||||
@@ -46,7 +45,7 @@ sub fetchInput {
|
|||||||
}
|
}
|
||||||
chomp $storePath;
|
chomp $storePath;
|
||||||
|
|
||||||
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
|
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
|
||||||
|
|
||||||
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
||||||
{srcpath => $uri, sha256hash => $sha256});
|
{srcpath => $uri, sha256hash => $sha256});
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
|||||||
use Hydra::Helper::Exec;
|
use Hydra::Helper::Exec;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use IPC::Run;
|
use IPC::Run;
|
||||||
use Nix::Store;
|
|
||||||
|
|
||||||
sub supportedInputTypes {
|
sub supportedInputTypes {
|
||||||
my ($self, $inputTypes) = @_;
|
my ($self, $inputTypes) = @_;
|
||||||
@@ -45,7 +44,7 @@ sub fetchInput {
|
|||||||
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
||||||
{uri => $uri, revision => $revision});
|
{uri => $uri, revision => $revision});
|
||||||
|
|
||||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||||
|
|
||||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||||
$storePath = $cachedInput->storepath;
|
$storePath = $cachedInput->storepath;
|
||||||
@@ -62,16 +61,16 @@ sub fetchInput {
|
|||||||
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
||||||
|
|
||||||
if ($type eq "svn-checkout") {
|
if ($type eq "svn-checkout") {
|
||||||
$storePath = addToStore($wcPath, 1, "sha256");
|
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
|
||||||
} else {
|
} else {
|
||||||
# Hm, if the Nix Perl bindings supported filters in
|
# Hm, if the Nix Perl bindings supported filters in
|
||||||
# addToStore(), then we wouldn't need to make a copy here.
|
# addToStore(), then we wouldn't need to make a copy here.
|
||||||
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
||||||
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
||||||
$storePath = addToStore("$tmpDir/source", 1, "sha256");
|
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
|
||||||
}
|
}
|
||||||
|
|
||||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||||
|
|
||||||
$self->{db}->txn_do(sub {
|
$self->{db}->txn_do(sub {
|
||||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
|
|||||||
=head2 path
|
=head2 path
|
||||||
|
|
||||||
data_type: 'text'
|
data_type: 'text'
|
||||||
is_nullable: 0
|
is_nullable: 1
|
||||||
|
|
||||||
=cut
|
=cut
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
|
|||||||
"name",
|
"name",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 0 },
|
||||||
"path",
|
"path",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
);
|
);
|
||||||
|
|
||||||
=head1 PRIMARY KEY
|
=head1 PRIMARY KEY
|
||||||
@@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg
|
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
|
||||||
|
|
||||||
my %hint = (
|
my %hint = (
|
||||||
columns => [
|
columns => [
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
|
|||||||
=head2 path
|
=head2 path
|
||||||
|
|
||||||
data_type: 'text'
|
data_type: 'text'
|
||||||
is_nullable: 0
|
is_nullable: 1
|
||||||
|
|
||||||
=cut
|
=cut
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
|
|||||||
"name",
|
"name",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 0 },
|
||||||
"path",
|
"path",
|
||||||
{ data_type => "text", is_nullable => 0 },
|
{ data_type => "text", is_nullable => 1 },
|
||||||
);
|
);
|
||||||
|
|
||||||
=head1 PRIMARY KEY
|
=head1 PRIMARY KEY
|
||||||
@@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
|
|||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw
|
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
|
||||||
|
|
||||||
|
|
||||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ use MIME::Base64;
|
|||||||
use Nix::Manifest;
|
use Nix::Manifest;
|
||||||
use Nix::Store;
|
use Nix::Store;
|
||||||
use Nix::Utils;
|
use Nix::Utils;
|
||||||
|
use Hydra::Helper::Nix;
|
||||||
use base qw/Catalyst::View/;
|
use base qw/Catalyst::View/;
|
||||||
|
|
||||||
sub process {
|
sub process {
|
||||||
@@ -17,7 +18,7 @@ sub process {
|
|||||||
|
|
||||||
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
||||||
|
|
||||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
|
my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1);
|
||||||
|
|
||||||
my $info;
|
my $info;
|
||||||
$info .= "StorePath: $storePath\n";
|
$info .= "StorePath: $storePath\n";
|
||||||
@@ -28,8 +29,8 @@ sub process {
|
|||||||
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
||||||
if (defined $deriver) {
|
if (defined $deriver) {
|
||||||
$info .= "Deriver: " . basename $deriver . "\n";
|
$info .= "Deriver: " . basename $deriver . "\n";
|
||||||
if (isValidPath($deriver)) {
|
if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||||
my $drv = derivationFromPath($deriver);
|
my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver);
|
||||||
$info .= "System: $drv->{platform}\n";
|
$info .= "System: $drv->{platform}\n";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,10 @@ sub process {
|
|||||||
|
|
||||||
my $tail = int($c->stash->{tail} // "0");
|
my $tail = int($c->stash->{tail} // "0");
|
||||||
|
|
||||||
if ($logPath =~ /\.bz2$/) {
|
if ($logPath =~ /\.zst$/) {
|
||||||
|
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||||
|
open($fh, "-|", "zstd -dc < '$logPath' $doTail") or die;
|
||||||
|
} elsif ($logPath =~ /\.bz2$/) {
|
||||||
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
my $doTail = $tail ? "| tail -n '$tail'" : "";
|
||||||
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
|
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <pqxx/pqxx>
|
#include <pqxx/pqxx>
|
||||||
|
|
||||||
|
#include "environment-variables.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
|
#include "file-system.hh"
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
|
|
||||||
struct HydraConfig
|
struct HydraConfig
|
||||||
|
|||||||
@@ -33,7 +33,7 @@
|
|||||||
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||||
<div class="modal-dialog" role="document">
|
<div class="modal-dialog" role="document">
|
||||||
<div class="modal-content">
|
<div class="modal-content">
|
||||||
<form>
|
<form id="signin-form">
|
||||||
<div class="modal-body">
|
<div class="modal-body">
|
||||||
<div class="form-group">
|
<div class="form-group">
|
||||||
<label for="username" class="col-form-label">User name</label>
|
<label for="username" class="col-form-label">User name</label>
|
||||||
@@ -45,7 +45,7 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="modal-footer">
|
<div class="modal-footer">
|
||||||
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
|
<button type="submit" class="btn btn-primary">Sign in</button>
|
||||||
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
@@ -57,10 +57,11 @@
|
|||||||
|
|
||||||
function finishSignOut() { }
|
function finishSignOut() { }
|
||||||
|
|
||||||
$("#do-signin").click(function() {
|
$("#signin-form").submit(function(e) {
|
||||||
|
e.preventDefault();
|
||||||
requestJSON({
|
requestJSON({
|
||||||
url: "[% c.uri_for('/login') %]",
|
url: "[% c.uri_for('/login') %]",
|
||||||
data: $(this).parents("form").serialize(),
|
data: $(this).serialize(),
|
||||||
type: 'POST',
|
type: 'POST',
|
||||||
success: function(data) {
|
success: function(data) {
|
||||||
window.location.reload();
|
window.location.reload();
|
||||||
|
|||||||
@@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
|
|||||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||||
[% IF bi1.type == "git" %]
|
[% IF bi1.type == "git" %]
|
||||||
<tr><td>
|
<tr><td>
|
||||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
|
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 12) _ ' to ' _ bi2.revision.substr(0, 12)) %]</tt>
|
||||||
</td></tr>
|
</td></tr>
|
||||||
[% ELSE %]
|
[% ELSE %]
|
||||||
<tr><td>
|
<tr><td>
|
||||||
|
|||||||
@@ -205,6 +205,7 @@
|
|||||||
if (!c) return;
|
if (!c) return;
|
||||||
requestJSON({
|
requestJSON({
|
||||||
url: "[% HTML.escape(c.uri_for('/api/push', { jobsets = project.name _ ':' _ jobset.name, force = "1" })) %]",
|
url: "[% HTML.escape(c.uri_for('/api/push', { jobsets = project.name _ ':' _ jobset.name, force = "1" })) %]",
|
||||||
|
type: 'POST',
|
||||||
success: function(data) {
|
success: function(data) {
|
||||||
bootbox.alert("The jobset has been scheduled for evaluation.");
|
bootbox.alert("The jobset has been scheduled for evaluation.");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -134,7 +134,7 @@
|
|||||||
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
|
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
|
||||||
[% IF c.config.enable_google_login %]
|
[% IF c.config.enable_google_login %]
|
||||||
<script src="https://accounts.google.com/gsi/client" async defer></script>
|
<script src="https://accounts.google.com/gsi/client" async defer></script>
|
||||||
<div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-callback="onGoogleSignIn">
|
<div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-auto_prompt="false" data-callback="onGoogleSignIn">
|
||||||
</div>
|
</div>
|
||||||
<div class="g_id_signin" data-type="standard"></div>
|
<div class="g_id_signin" data-type="standard"></div>
|
||||||
<div class="dropdown-divider"></div>
|
<div class="dropdown-divider"></div>
|
||||||
|
|||||||
@@ -91,6 +91,7 @@
|
|||||||
[% INCLUDE roleoption mutable=mutable role="restart-jobs" %]
|
[% INCLUDE roleoption mutable=mutable role="restart-jobs" %]
|
||||||
[% INCLUDE roleoption mutable=mutable role="bump-to-front" %]
|
[% INCLUDE roleoption mutable=mutable role="bump-to-front" %]
|
||||||
[% INCLUDE roleoption mutable=mutable role="cancel-build" %]
|
[% INCLUDE roleoption mutable=mutable role="cancel-build" %]
|
||||||
|
[% INCLUDE roleoption mutable=mutable role="eval-jobset" %]
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -85,14 +85,14 @@ sub attrsToSQL {
|
|||||||
# Fetch a store path from 'eval_substituter' if not already present.
|
# Fetch a store path from 'eval_substituter' if not already present.
|
||||||
sub getPath {
|
sub getPath {
|
||||||
my ($path) = @_;
|
my ($path) = @_;
|
||||||
return 1 if isValidPath($path);
|
return 1 if $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||||
|
|
||||||
my $substituter = $config->{eval_substituter};
|
my $substituter = $config->{eval_substituter};
|
||||||
|
|
||||||
system("nix", "--experimental-features", "nix-command", "copy", "--from", $substituter, "--", $path)
|
system("nix", "--experimental-features", "nix-command", "copy", "--from", $substituter, "--", $path)
|
||||||
if defined $substituter;
|
if defined $substituter;
|
||||||
|
|
||||||
return isValidPath($path);
|
return $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -143,7 +143,7 @@ sub fetchInputBuild {
|
|||||||
, version => $version
|
, version => $version
|
||||||
, outputName => $mainOutput->name
|
, outputName => $mainOutput->name
|
||||||
};
|
};
|
||||||
if (isValidPath($prevBuild->drvpath)) {
|
if ($MACHINE_LOCAL_STORE->isValidPath($prevBuild->drvpath)) {
|
||||||
$result->{drvPath} = $prevBuild->drvpath;
|
$result->{drvPath} = $prevBuild->drvpath;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,7 +233,7 @@ sub fetchInputEval {
|
|||||||
my $out = $build->buildoutputs->find({ name => "out" });
|
my $out = $build->buildoutputs->find({ name => "out" });
|
||||||
next unless defined $out;
|
next unless defined $out;
|
||||||
# FIXME: Should we fail if the path is not valid?
|
# FIXME: Should we fail if the path is not valid?
|
||||||
next unless isValidPath($out->path);
|
next unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||||
$jobs->{$build->get_column('job')} = $out->path;
|
$jobs->{$build->get_column('job')} = $out->path;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -438,13 +438,17 @@ sub checkBuild {
|
|||||||
# new build to be scheduled if the meta.maintainers field is
|
# new build to be scheduled if the meta.maintainers field is
|
||||||
# changed?
|
# changed?
|
||||||
if (defined $prevEval) {
|
if (defined $prevEval) {
|
||||||
|
my $pathOrDrvConstraint = defined $firstOutputPath
|
||||||
|
? { path => $firstOutputPath }
|
||||||
|
: { drvPath => $drvPath };
|
||||||
|
|
||||||
my ($prevBuild) = $prevEval->builds->search(
|
my ($prevBuild) = $prevEval->builds->search(
|
||||||
# The "project" and "jobset" constraints are
|
# The "project" and "jobset" constraints are
|
||||||
# semantically unnecessary (because they're implied by
|
# semantically unnecessary (because they're implied by
|
||||||
# the eval), but they give a factor 1000 speedup on
|
# the eval), but they give a factor 1000 speedup on
|
||||||
# the Nixpkgs jobset with PostgreSQL.
|
# the Nixpkgs jobset with PostgreSQL.
|
||||||
{ jobset_id => $jobset->get_column('id'), job => $jobName,
|
{ jobset_id => $jobset->get_column('id'), job => $jobName,
|
||||||
name => $firstOutputName, path => $firstOutputPath },
|
name => $firstOutputName, %$pathOrDrvConstraint },
|
||||||
{ rows => 1, columns => ['id', 'finished'], join => ['buildoutputs'] });
|
{ rows => 1, columns => ['id', 'finished'], join => ['buildoutputs'] });
|
||||||
if (defined $prevBuild) {
|
if (defined $prevBuild) {
|
||||||
#print STDERR " already scheduled/built as build ", $prevBuild->id, "\n";
|
#print STDERR " already scheduled/built as build ", $prevBuild->id, "\n";
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ use warnings;
|
|||||||
use File::Path;
|
use File::Path;
|
||||||
use File::stat;
|
use File::stat;
|
||||||
use File::Basename;
|
use File::Basename;
|
||||||
use Nix::Store;
|
|
||||||
use Hydra::Config;
|
use Hydra::Config;
|
||||||
use Hydra::Schema;
|
use Hydra::Schema;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
@@ -47,7 +46,7 @@ sub keepBuild {
|
|||||||
$build->finished && ($build->buildstatus == 0 || $build->buildstatus == 6))
|
$build->finished && ($build->buildstatus == 0 || $build->buildstatus == 6))
|
||||||
{
|
{
|
||||||
foreach my $path (split / /, $build->get_column('outpaths')) {
|
foreach my $path (split / /, $build->get_column('outpaths')) {
|
||||||
if (isValidPath($path)) {
|
if ($MACHINE_LOCAL_STORE->isValidPath($path)) {
|
||||||
addRoot $path;
|
addRoot $path;
|
||||||
} else {
|
} else {
|
||||||
print STDERR " warning: output ", $path, " has disappeared\n" if $build->finished;
|
print STDERR " warning: output ", $path, " has disappeared\n" if $build->finished;
|
||||||
@@ -55,7 +54,7 @@ sub keepBuild {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!$build->finished || ($keepFailedDrvs && $build->buildstatus != 0)) {
|
if (!$build->finished || ($keepFailedDrvs && $build->buildstatus != 0)) {
|
||||||
if (isValidPath($build->drvpath)) {
|
if ($MACHINE_LOCAL_STORE->isValidPath($build->drvpath)) {
|
||||||
addRoot $build->drvpath;
|
addRoot $build->drvpath;
|
||||||
} else {
|
} else {
|
||||||
print STDERR " warning: derivation ", $build->drvpath, " has disappeared\n";
|
print STDERR " warning: derivation ", $build->drvpath, " has disappeared\n";
|
||||||
|
|||||||
@@ -247,7 +247,7 @@ create trigger BuildBumped after update on Builds for each row
|
|||||||
create table BuildOutputs (
|
create table BuildOutputs (
|
||||||
build integer not null,
|
build integer not null,
|
||||||
name text not null,
|
name text not null,
|
||||||
path text not null,
|
path text,
|
||||||
primary key (build, name),
|
primary key (build, name),
|
||||||
foreign key (build) references Builds(id) on delete cascade
|
foreign key (build) references Builds(id) on delete cascade
|
||||||
);
|
);
|
||||||
@@ -303,7 +303,7 @@ create table BuildStepOutputs (
|
|||||||
build integer not null,
|
build integer not null,
|
||||||
stepnr integer not null,
|
stepnr integer not null,
|
||||||
name text not null,
|
name text not null,
|
||||||
path text not null,
|
path text,
|
||||||
primary key (build, stepnr, name),
|
primary key (build, stepnr, name),
|
||||||
foreign key (build) references Builds(id) on delete cascade,
|
foreign key (build) references Builds(id) on delete cascade,
|
||||||
foreign key (build, stepnr) references BuildSteps(build, stepnr) on delete cascade
|
foreign key (build, stepnr) references BuildSteps(build, stepnr) on delete cascade
|
||||||
|
|||||||
4
src/sql/upgrade-84.sql
Normal file
4
src/sql/upgrade-84.sql
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
-- CA derivations do not have statically known output paths. The values
|
||||||
|
-- are only filled in after the build runs.
|
||||||
|
ALTER TABLE BuildStepOutputs ALTER COLUMN path DROP NOT NULL;
|
||||||
|
ALTER TABLE BuildOutputs ALTER COLUMN path DROP NOT NULL;
|
||||||
@@ -57,6 +57,7 @@ subtest "getLDAPConfig" => sub {
|
|||||||
"hydra_cancel-build" => [ "cancel-build" ],
|
"hydra_cancel-build" => [ "cancel-build" ],
|
||||||
"hydra_create-projects" => [ "create-projects" ],
|
"hydra_create-projects" => [ "create-projects" ],
|
||||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||||
|
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"The empty file and set env var make legacy mode active."
|
"The empty file and set env var make legacy mode active."
|
||||||
@@ -177,6 +178,7 @@ subtest "get_legacy_ldap_config" => sub {
|
|||||||
"hydra_cancel-build" => [ "cancel-build" ],
|
"hydra_cancel-build" => [ "cancel-build" ],
|
||||||
"hydra_create-projects" => [ "create-projects" ],
|
"hydra_create-projects" => [ "create-projects" ],
|
||||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||||
|
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Legacy, default role maps are applied."
|
"Legacy, default role maps are applied."
|
||||||
|
|||||||
@@ -22,9 +22,24 @@ sub is_json {
|
|||||||
}
|
}
|
||||||
|
|
||||||
my $ctx = test_context();
|
my $ctx = test_context();
|
||||||
|
|
||||||
Catalyst::Test->import('Hydra');
|
Catalyst::Test->import('Hydra');
|
||||||
|
|
||||||
|
# Create a user to log in to
|
||||||
|
my $user = $ctx->db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@example.com', password => '!' });
|
||||||
|
$user->setPassword('foobar');
|
||||||
|
$user->userroles->update_or_create({ role => 'admin' });
|
||||||
|
|
||||||
|
# Login and save cookie for future requests
|
||||||
|
my $req = request(POST '/login',
|
||||||
|
Referer => 'http://localhost/',
|
||||||
|
Content => {
|
||||||
|
username => 'alice',
|
||||||
|
password => 'foobar'
|
||||||
|
}
|
||||||
|
);
|
||||||
|
is($req->code, 302, "The login redirects");
|
||||||
|
my $cookie = $req->header("set-cookie");
|
||||||
|
|
||||||
my $finishedBuilds = $ctx->makeAndEvaluateJobset(
|
my $finishedBuilds = $ctx->makeAndEvaluateJobset(
|
||||||
expression => "one-job.nix",
|
expression => "one-job.nix",
|
||||||
build => 1
|
build => 1
|
||||||
@@ -109,7 +124,10 @@ subtest "/api/push" => sub {
|
|||||||
my $jobsetName = $jobset->name;
|
my $jobsetName = $jobset->name;
|
||||||
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
|
is($jobset->forceeval, undef, "The existing jobset is not set to be forced to eval");
|
||||||
|
|
||||||
my $response = request(GET "/api/push?jobsets=$projectName:$jobsetName&force=1");
|
my $response = request(POST "/api/push?jobsets=$projectName:$jobsetName&force=1",
|
||||||
|
Cookie => $cookie,
|
||||||
|
Referer => 'http://localhost/',
|
||||||
|
);
|
||||||
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
||||||
|
|
||||||
my $data = is_json($response);
|
my $data = is_json($response);
|
||||||
@@ -128,7 +146,10 @@ subtest "/api/push" => sub {
|
|||||||
|
|
||||||
print STDERR $repo;
|
print STDERR $repo;
|
||||||
|
|
||||||
my $response = request(GET "/api/push?repos=$repo&force=1");
|
my $response = request(POST "/api/push?repos=$repo&force=1",
|
||||||
|
Cookie => $cookie,
|
||||||
|
Referer => 'http://localhost/',
|
||||||
|
);
|
||||||
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
ok($response->is_success, "The API enpdoint for triggering jobsets returns 200.");
|
||||||
|
|
||||||
my $data = is_json($response);
|
my $data = is_json($response);
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ $ldap->add_group("hydra_create-projects", $users->{"many_roles"}->{"username"});
|
|||||||
$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"});
|
$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"});
|
||||||
$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"});
|
$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"});
|
||||||
$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"});
|
$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"});
|
||||||
|
$ldap->add_group("hydra_eval-jobset", $users->{"many_roles"}->{"username"});
|
||||||
|
|
||||||
my $hydra_ldap_config = "${\$ldap->tmpdir()}/hydra_ldap_config.yaml";
|
my $hydra_ldap_config = "${\$ldap->tmpdir()}/hydra_ldap_config.yaml";
|
||||||
LDAPContext::write_file($hydra_ldap_config, <<YAML);
|
LDAPContext::write_file($hydra_ldap_config, <<YAML);
|
||||||
@@ -68,7 +69,7 @@ subtest "Valid login attempts" => sub {
|
|||||||
unrelated => [],
|
unrelated => [],
|
||||||
admin => ["admin"],
|
admin => ["admin"],
|
||||||
not_admin => [],
|
not_admin => [],
|
||||||
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ],
|
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build", "eval-jobset" ],
|
||||||
);
|
);
|
||||||
for my $username (keys %users_to_roles) {
|
for my $username (keys %users_to_roles) {
|
||||||
my $user = $users->{$username};
|
my $user = $users->{$username};
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ $ldap->add_group("hydra_create-projects", $users->{"many_roles"}->{"username"});
|
|||||||
$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"});
|
$ldap->add_group("hydra_restart-jobs", $users->{"many_roles"}->{"username"});
|
||||||
$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"});
|
$ldap->add_group("hydra_bump-to-front", $users->{"many_roles"}->{"username"});
|
||||||
$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"});
|
$ldap->add_group("hydra_cancel-build", $users->{"many_roles"}->{"username"});
|
||||||
|
$ldap->add_group("hydra_eval-jobset", $users->{"many_roles"}->{"username"});
|
||||||
|
|
||||||
|
|
||||||
my $ctx = test_context(
|
my $ctx = test_context(
|
||||||
@@ -76,10 +77,12 @@ my $ctx = test_context(
|
|||||||
hydra_cancel-build = cancel-build
|
hydra_cancel-build = cancel-build
|
||||||
hydra_bump-to-front = bump-to-front
|
hydra_bump-to-front = bump-to-front
|
||||||
hydra_restart-jobs = restart-jobs
|
hydra_restart-jobs = restart-jobs
|
||||||
|
hydra_eval-jobset = eval-jobset
|
||||||
|
|
||||||
hydra_one_group_many_roles = create-projects
|
hydra_one_group_many_roles = create-projects
|
||||||
hydra_one_group_many_roles = cancel-build
|
hydra_one_group_many_roles = cancel-build
|
||||||
hydra_one_group_many_roles = bump-to-front
|
hydra_one_group_many_roles = bump-to-front
|
||||||
|
hydra_one_group_many-roles = eval-jobset
|
||||||
</role_mapping>
|
</role_mapping>
|
||||||
</ldap>
|
</ldap>
|
||||||
CFG
|
CFG
|
||||||
@@ -92,7 +95,7 @@ subtest "Valid login attempts" => sub {
|
|||||||
unrelated => [],
|
unrelated => [],
|
||||||
admin => ["admin"],
|
admin => ["admin"],
|
||||||
not_admin => [],
|
not_admin => [],
|
||||||
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build" ],
|
many_roles => [ "create-projects", "restart-jobs", "bump-to-front", "cancel-build", "eval-jobset" ],
|
||||||
many_roles_one_group => [ "create-projects", "bump-to-front", "cancel-build" ],
|
many_roles_one_group => [ "create-projects", "bump-to-front", "cancel-build" ],
|
||||||
);
|
);
|
||||||
for my $username (keys %users_to_roles) {
|
for my $username (keys %users_to_roles) {
|
||||||
|
|||||||
63
t/content-addressed/basic.t
Normal file
63
t/content-addressed/basic.t
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
use feature 'unicode_strings';
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
use Setup;
|
||||||
|
|
||||||
|
my %ctx = test_init(
|
||||||
|
nix_config => qq|
|
||||||
|
experimental-features = ca-derivations
|
||||||
|
|,
|
||||||
|
);
|
||||||
|
|
||||||
|
require Hydra::Schema;
|
||||||
|
require Hydra::Model::DB;
|
||||||
|
|
||||||
|
use JSON::MaybeXS;
|
||||||
|
|
||||||
|
use HTTP::Request::Common;
|
||||||
|
use Test2::V0;
|
||||||
|
require Catalyst::Test;
|
||||||
|
Catalyst::Test->import('Hydra');
|
||||||
|
|
||||||
|
my $db = Hydra::Model::DB->new;
|
||||||
|
hydra_setup($db);
|
||||||
|
|
||||||
|
my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
|
||||||
|
|
||||||
|
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||||
|
|
||||||
|
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix should exit with return code 0");
|
||||||
|
is(nrQueuedBuildsForJobset($jobset), 6, "Evaluating jobs/content-addressed.nix should result in 6 builds");
|
||||||
|
|
||||||
|
for my $build (queuedBuildsForJobset($jobset)) {
|
||||||
|
ok(runBuild($build), "Build '".$build->job."' from jobs/content-addressed.nix should exit with code 0");
|
||||||
|
my $newbuild = $db->resultset('Builds')->find($build->id);
|
||||||
|
is($newbuild->finished, 1, "Build '".$build->job."' from jobs/content-addressed.nix should be finished.");
|
||||||
|
my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : $build->job =~ /FailingCA/ ? 2 : 0;
|
||||||
|
is($newbuild->buildstatus, $expected, "Build '".$build->job."' from jobs/content-addressed.nix should have buildstatus $expected.");
|
||||||
|
|
||||||
|
my $response = request("/build/".$build->id);
|
||||||
|
ok($response->is_success, "The 'build' page for build '".$build->job."' should load properly");
|
||||||
|
|
||||||
|
if ($newbuild->buildstatus == 0) {
|
||||||
|
my $buildOutputs = $newbuild->buildoutputs;
|
||||||
|
for my $output ($newbuild->buildoutputs) {
|
||||||
|
# XXX: This hardcodes /nix/store/.
|
||||||
|
# It's fine because in practice the nix store for the tests will be of
|
||||||
|
# the form `/some/thing/nix/store/`, but it would be cleaner if there
|
||||||
|
# was a way to query Nix for its store dir?
|
||||||
|
like(
|
||||||
|
$output->path, qr|/nix/store/|,
|
||||||
|
"Output '".$output->name."' of build '".$build->job."' should be a valid store path"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# XXX: deststoredir is undefined: Use of uninitialized value $ctx{"deststoredir"} in concatenation (.) or string at t/content-addressed/basic.t line 58.
|
||||||
|
# XXX: This test seems to not do what it seems to be doing. See documentation: https://metacpan.org/pod/Test2::V0#isnt($got,-$do_not_want,-$name)
|
||||||
|
isnt(<$ctx{deststoredir}/realisations/*>, "", "The destination store should have the realisations of the built derivations registered");
|
||||||
|
|
||||||
|
done_testing;
|
||||||
|
|
||||||
28
t/content-addressed/without-experimental-feature.t
Normal file
28
t/content-addressed/without-experimental-feature.t
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
use feature 'unicode_strings';
|
||||||
|
use strict;
|
||||||
|
use warnings;
|
||||||
|
use Setup;
|
||||||
|
|
||||||
|
my %ctx = test_init();
|
||||||
|
|
||||||
|
require Hydra::Schema;
|
||||||
|
require Hydra::Model::DB;
|
||||||
|
|
||||||
|
use JSON::MaybeXS;
|
||||||
|
|
||||||
|
use HTTP::Request::Common;
|
||||||
|
use Test2::V0;
|
||||||
|
require Catalyst::Test;
|
||||||
|
Catalyst::Test->import('Hydra');
|
||||||
|
|
||||||
|
my $db = Hydra::Model::DB->new;
|
||||||
|
hydra_setup($db);
|
||||||
|
|
||||||
|
my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
|
||||||
|
|
||||||
|
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||||
|
|
||||||
|
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix without the experimental feature should exit with return code 0");
|
||||||
|
is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/content-addressed.nix without the experimental Nix feature should result in 0 build");
|
||||||
|
|
||||||
|
done_testing;
|
||||||
@@ -31,6 +31,10 @@ if ($sd_res != 0) {
|
|||||||
skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)");
|
skip_all("`systemd-run` returned non-zero when executing `true` (expected 0)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# XXX(Mindavi): We should think about how to fix this.
|
||||||
|
# Note that it was always skipped on ofborg/h.n.o (nixos hydra) since systemd-run is not present in the ambient environment there.
|
||||||
|
skip_all("Always fails, an error about 'oom' being a string is logged and the process never OOMs. Needs a way to use more memory.");
|
||||||
|
|
||||||
my $ctx = test_context();
|
my $ctx = test_context();
|
||||||
|
|
||||||
# Contain the memory usage to 25 MegaBytes using `systemd-run`
|
# Contain the memory usage to 25 MegaBytes using `systemd-run`
|
||||||
|
|||||||
@@ -6,4 +6,9 @@ rec {
|
|||||||
system = builtins.currentSystem;
|
system = builtins.currentSystem;
|
||||||
PATH = path;
|
PATH = path;
|
||||||
} // args);
|
} // args);
|
||||||
|
mkContentAddressedDerivation = args: mkDerivation ({
|
||||||
|
__contentAddressed = true;
|
||||||
|
outputHashMode = "recursive";
|
||||||
|
outputHashAlgo = "sha256";
|
||||||
|
} // args);
|
||||||
}
|
}
|
||||||
|
|||||||
42
t/jobs/content-addressed.nix
Normal file
42
t/jobs/content-addressed.nix
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
let cfg = import ./config.nix; in
|
||||||
|
rec {
|
||||||
|
empty_dir =
|
||||||
|
cfg.mkContentAddressedDerivation {
|
||||||
|
name = "empty-dir";
|
||||||
|
builder = ./empty-dir-builder.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
fails =
|
||||||
|
cfg.mkContentAddressedDerivation {
|
||||||
|
name = "fails";
|
||||||
|
builder = ./fail.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
succeed_with_failed =
|
||||||
|
cfg.mkContentAddressedDerivation {
|
||||||
|
name = "succeed-with-failed";
|
||||||
|
builder = ./succeed-with-failed.sh;
|
||||||
|
};
|
||||||
|
|
||||||
|
caDependingOnCA =
|
||||||
|
cfg.mkContentAddressedDerivation {
|
||||||
|
name = "ca-depending-on-ca";
|
||||||
|
builder = ./dir-with-file-builder.sh;
|
||||||
|
FOO = empty_dir;
|
||||||
|
};
|
||||||
|
|
||||||
|
caDependingOnFailingCA =
|
||||||
|
cfg.mkContentAddressedDerivation {
|
||||||
|
name = "ca-depending-on-failing-ca";
|
||||||
|
builder = ./dir-with-file-builder.sh;
|
||||||
|
FOO = fails;
|
||||||
|
};
|
||||||
|
|
||||||
|
nonCaDependingOnCA =
|
||||||
|
cfg.mkDerivation {
|
||||||
|
name = "non-ca-depending-on-ca";
|
||||||
|
builder = ./dir-with-file-builder.sh;
|
||||||
|
FOO = empty_dir;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
4
t/jobs/dir-with-file-builder.sh
Executable file
4
t/jobs/dir-with-file-builder.sh
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
|
||||||
|
mkdir $out
|
||||||
|
echo foo > $out/a-file
|
||||||
@@ -1,6 +1,3 @@
|
|||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
|
|
||||||
# Workaround for https://github.com/NixOS/nix/pull/6051
|
|
||||||
echo "some output"
|
|
||||||
|
|
||||||
mkdir $out
|
mkdir $out
|
||||||
|
|||||||
@@ -39,7 +39,11 @@ use Hydra::Helper::Exec;
|
|||||||
sub new {
|
sub new {
|
||||||
my ($class, %opts) = @_;
|
my ($class, %opts) = @_;
|
||||||
|
|
||||||
my $dir = File::Temp->newdir();
|
my $deststoredir;
|
||||||
|
|
||||||
|
# Cleanup will be managed by yath. By the default it will be cleaned
|
||||||
|
# up, but can be kept to aid in debugging test failures.
|
||||||
|
my $dir = File::Temp->newdir(CLEANUP => 0);
|
||||||
|
|
||||||
$ENV{'HYDRA_DATA'} = "$dir/hydra-data";
|
$ENV{'HYDRA_DATA'} = "$dir/hydra-data";
|
||||||
mkdir $ENV{'HYDRA_DATA'};
|
mkdir $ENV{'HYDRA_DATA'};
|
||||||
@@ -53,6 +57,7 @@ sub new {
|
|||||||
my $hydra_config = $opts{'hydra_config'} || "";
|
my $hydra_config = $opts{'hydra_config'} || "";
|
||||||
$hydra_config = "queue_runner_metrics_address = 127.0.0.1:0\n" . $hydra_config;
|
$hydra_config = "queue_runner_metrics_address = 127.0.0.1:0\n" . $hydra_config;
|
||||||
if ($opts{'use_external_destination_store'} // 1) {
|
if ($opts{'use_external_destination_store'} // 1) {
|
||||||
|
$deststoredir = "$dir/nix/dest-store";
|
||||||
$hydra_config = "store_uri = file://$dir/nix/dest-store\n" . $hydra_config;
|
$hydra_config = "store_uri = file://$dir/nix/dest-store\n" . $hydra_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,14 +84,15 @@ sub new {
|
|||||||
nix_state_dir => $nix_state_dir,
|
nix_state_dir => $nix_state_dir,
|
||||||
nix_log_dir => $nix_log_dir,
|
nix_log_dir => $nix_log_dir,
|
||||||
testdir => abs_path(dirname(__FILE__) . "/.."),
|
testdir => abs_path(dirname(__FILE__) . "/.."),
|
||||||
jobsdir => abs_path(dirname(__FILE__) . "/../jobs")
|
jobsdir => abs_path(dirname(__FILE__) . "/../jobs"),
|
||||||
|
deststoredir => $deststoredir,
|
||||||
}, $class;
|
}, $class;
|
||||||
|
|
||||||
if ($opts{'before_init'}) {
|
if ($opts{'before_init'}) {
|
||||||
$opts{'before_init'}->($self);
|
$opts{'before_init'}->($self);
|
||||||
}
|
}
|
||||||
|
|
||||||
expectOkay(5, ("hydra-init"));
|
expectOkay(30, ("hydra-init"));
|
||||||
|
|
||||||
return $self;
|
return $self;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ sub add_user {
|
|||||||
my $email = $opts{'email'} // "$name\@example";
|
my $email = $opts{'email'} // "$name\@example";
|
||||||
my $password = $opts{'password'} // rand_chars();
|
my $password = $opts{'password'} // rand_chars();
|
||||||
|
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(1, ("slappasswd", "-s", $password));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("slappasswd", "-s", $password));
|
||||||
if ($res) {
|
if ($res) {
|
||||||
die "Failed to execute slappasswd ($res): $stderr, $stdout";
|
die "Failed to execute slappasswd ($res): $stderr, $stdout";
|
||||||
}
|
}
|
||||||
@@ -178,7 +178,7 @@ sub start {
|
|||||||
sub validateConfig {
|
sub validateConfig {
|
||||||
my ($self) = @_;
|
my ($self) = @_;
|
||||||
|
|
||||||
expectOkay(1, ("slaptest", "-u", "-F", $self->{"_slapd_dir"}));
|
expectOkay(5, ("slaptest", "-u", "-F", $self->{"_slapd_dir"}));
|
||||||
}
|
}
|
||||||
|
|
||||||
sub _spawn {
|
sub _spawn {
|
||||||
@@ -218,7 +218,7 @@ sub load_ldif {
|
|||||||
|
|
||||||
my $path = "${\$self->{'_tmpdir'}}/load.ldif";
|
my $path = "${\$self->{'_tmpdir'}}/load.ldif";
|
||||||
write_file($path, $content);
|
write_file($path, $content);
|
||||||
expectOkay(1, ("slapadd", "-F", $self->{"_slapd_dir"}, "-b", $suffix, "-l", $path));
|
expectOkay(5, ("slapadd", "-F", $self->{"_slapd_dir"}, "-b", $suffix, "-l", $path));
|
||||||
$self->validateConfig();
|
$self->validateConfig();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ subtest "Building, caching, and then garbage collecting the underlying job" => s
|
|||||||
|
|
||||||
ok(unlink(Hydra::Helper::Nix::gcRootFor($path)), "Unlinking the GC root for underlying Dependency succeeds");
|
ok(unlink(Hydra::Helper::Nix::gcRootFor($path)), "Unlinking the GC root for underlying Dependency succeeds");
|
||||||
|
|
||||||
(my $ret, my $stdout, my $stderr) = captureStdoutStderr(5, "nix-store", "--delete", $path);
|
(my $ret, my $stdout, my $stderr) = captureStdoutStderr(15, "nix-store", "--delete", $path);
|
||||||
is($ret, 0, "Deleting the underlying dependency should succeed");
|
is($ret, 0, "Deleting the underlying dependency should succeed");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ my $binarycachedir = File::Temp->newdir();
|
|||||||
|
|
||||||
my $ctx = test_context(
|
my $ctx = test_context(
|
||||||
nix_config => qq|
|
nix_config => qq|
|
||||||
experimental-features = nix-command
|
experimental-features = nix-command ca-derivations
|
||||||
substituters = file://${binarycachedir}?trusted=1
|
substituters = file://${binarycachedir}?trusted=1
|
||||||
|,
|
|,
|
||||||
hydra_config => q|
|
hydra_config => q|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ use warnings;
|
|||||||
use File::Basename;
|
use File::Basename;
|
||||||
use Hydra::Model::DB;
|
use Hydra::Model::DB;
|
||||||
use Hydra::Helper::Nix;
|
use Hydra::Helper::Nix;
|
||||||
use Nix::Store;
|
|
||||||
use Cwd;
|
use Cwd;
|
||||||
|
|
||||||
my $db = Hydra::Model::DB->new;
|
my $db = Hydra::Model::DB->new;
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ my $db = $ctx->db();
|
|||||||
|
|
||||||
subtest "Handling password and password hash creation" => sub {
|
subtest "Handling password and password hash creation" => sub {
|
||||||
subtest "Creating a user with a plain text password (insecure) stores the password securely" => sub {
|
subtest "Creating a user with a plain text password (insecure) stores the password securely" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-create-user", "plain-text-user", "--password", "foobar"));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(15, ("hydra-create-user", "plain-text-user", "--password", "foobar"));
|
||||||
is($res, 0, "hydra-create-user should exit zero");
|
is($res, 0, "hydra-create-user should exit zero");
|
||||||
like($stderr, qr/Submitting plaintext passwords as arguments is deprecated and will be removed/, "Submitting a plain text password is deprecated.");
|
like($stderr, qr/Submitting plaintext passwords as arguments is deprecated and will be removed/, "Submitting a plain text password is deprecated.");
|
||||||
|
|
||||||
@@ -23,7 +23,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||||||
};
|
};
|
||||||
|
|
||||||
subtest "Creating a user with a sha1 password (still insecure) stores the password as a hashed sha1" => sub {
|
subtest "Creating a user with a sha1 password (still insecure) stores the password as a hashed sha1" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-create-user", "old-password-hash-user", "--password-hash", "8843d7f92416211de9ebb963ff4ce28125932878"));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(15, ("hydra-create-user", "old-password-hash-user", "--password-hash", "8843d7f92416211de9ebb963ff4ce28125932878"));
|
||||||
is($res, 0, "hydra-create-user should exit zero");
|
is($res, 0, "hydra-create-user should exit zero");
|
||||||
|
|
||||||
my $user = $db->resultset('Users')->find({ username => "old-password-hash-user" });
|
my $user = $db->resultset('Users')->find({ username => "old-password-hash-user" });
|
||||||
@@ -36,7 +36,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||||||
};
|
};
|
||||||
|
|
||||||
subtest "Creating a user with an argon2 password stores the password as given" => sub {
|
subtest "Creating a user with an argon2 password stores the password as given" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-create-user", "argon2-hash-user", "--password-hash", '$argon2id$v=19$m=262144,t=3,p=1$tMnV5paYjmIrUIb6hylaNA$M8/e0i3NGrjhOliVLa5LqQ'));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(15, ("hydra-create-user", "argon2-hash-user", "--password-hash", '$argon2id$v=19$m=262144,t=3,p=1$tMnV5paYjmIrUIb6hylaNA$M8/e0i3NGrjhOliVLa5LqQ'));
|
||||||
is($res, 0, "hydra-create-user should exit zero");
|
is($res, 0, "hydra-create-user should exit zero");
|
||||||
|
|
||||||
my $user = $db->resultset('Users')->find({ username => "argon2-hash-user" });
|
my $user = $db->resultset('Users')->find({ username => "argon2-hash-user" });
|
||||||
@@ -50,7 +50,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||||||
|
|
||||||
subtest "Creating a user by prompting for the password" => sub {
|
subtest "Creating a user by prompting for the password" => sub {
|
||||||
subtest "with the same password twice" => sub {
|
subtest "with the same password twice" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(5, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nmy-password\n");
|
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(15, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nmy-password\n");
|
||||||
is($res, 0, "hydra-create-user should exit zero");
|
is($res, 0, "hydra-create-user should exit zero");
|
||||||
|
|
||||||
my $user = $db->resultset('Users')->find({ username => "prompted-pass-user" });
|
my $user = $db->resultset('Users')->find({ username => "prompted-pass-user" });
|
||||||
@@ -62,7 +62,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||||||
};
|
};
|
||||||
|
|
||||||
subtest "With mismatched password confirmation" => sub {
|
subtest "With mismatched password confirmation" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(5, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nnot-my-password\n");
|
my ($res, $stdout, $stderr) = captureStdoutStderrWithStdin(15, ["hydra-create-user", "prompted-pass-user", "--password-prompt"], "my-password\nnot-my-password\n");
|
||||||
isnt($res, 0, "hydra-create-user should exit non-zero");
|
isnt($res, 0, "hydra-create-user should exit non-zero");
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -76,7 +76,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||||||
);
|
);
|
||||||
|
|
||||||
for my $case (@cases) {
|
for my $case (@cases) {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, (
|
my ($res, $stdout, $stderr) = captureStdoutStderr(15, (
|
||||||
"hydra-create-user", "bogus-password-options", @{$case}));
|
"hydra-create-user", "bogus-password-options", @{$case}));
|
||||||
like($stderr, qr/please specify only one of --password-prompt or --password-hash/, "We get an error about specifying the password");
|
like($stderr, qr/please specify only one of --password-prompt or --password-hash/, "We get an error about specifying the password");
|
||||||
isnt($res, 0, "hydra-create-user should exit non-zero with conflicting " . join(" ", @{$case}));
|
isnt($res, 0, "hydra-create-user should exit non-zero with conflicting " . join(" ", @{$case}));
|
||||||
@@ -84,7 +84,7 @@ subtest "Handling password and password hash creation" => sub {
|
|||||||
};
|
};
|
||||||
|
|
||||||
subtest "A password is not required for creating a Google-based account" => sub {
|
subtest "A password is not required for creating a Google-based account" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, (
|
my ($res, $stdout, $stderr) = captureStdoutStderr(15, (
|
||||||
"hydra-create-user", "google-account", "--type", "google"));
|
"hydra-create-user", "google-account", "--type", "google"));
|
||||||
is($res, 0, "hydra-create-user should exit zero");
|
is($res, 0, "hydra-create-user should exit zero");
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ subtest "hydra-init upgrades user's password hashes from sha1 to sha1 inside Arg
|
|||||||
$janet->setPassword("foobar");
|
$janet->setPassword("foobar");
|
||||||
|
|
||||||
is($alice->password, "8843d7f92416211de9ebb963ff4ce28125932878", "Alices's sha1 is stored in the database");
|
is($alice->password, "8843d7f92416211de9ebb963ff4ce28125932878", "Alices's sha1 is stored in the database");
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-init"));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(30, ("hydra-init"));
|
||||||
if ($res != 0) {
|
if ($res != 0) {
|
||||||
is($stdout, "");
|
is($stdout, "");
|
||||||
is($stderr, "");
|
is($stderr, "");
|
||||||
@@ -55,7 +55,7 @@ subtest "hydra-init upgrades user's password hashes from sha1 to sha1 inside Arg
|
|||||||
};
|
};
|
||||||
|
|
||||||
subtest "Running hydra-init don't break Alice or Janet's passwords" => sub {
|
subtest "Running hydra-init don't break Alice or Janet's passwords" => sub {
|
||||||
my ($res, $stdout, $stderr) = captureStdoutStderr(5, ("hydra-init"));
|
my ($res, $stdout, $stderr) = captureStdoutStderr(30, ("hydra-init"));
|
||||||
is($res, 0, "hydra-init should exit zero");
|
is($res, 0, "hydra-init should exit zero");
|
||||||
|
|
||||||
my $updatedAlice = $db->resultset('Users')->find({ username => "alice" });
|
my $updatedAlice = $db->resultset('Users')->find({ username => "alice" });
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ if (defined($ENV{"NIX_BUILD_CORES"})
|
|||||||
print STDERR "test.pl: Defaulting \$YATH_JOB_COUNT to \$NIX_BUILD_CORES (${\$ENV{'NIX_BUILD_CORES'}})\n";
|
print STDERR "test.pl: Defaulting \$YATH_JOB_COUNT to \$NIX_BUILD_CORES (${\$ENV{'NIX_BUILD_CORES'}})\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
system($^X, find_yath(), '-D', 'test', '--default-search' => './', @ARGV);
|
system($^X, find_yath(), '-D', 'test', '--qvf', '--event-timeout', 240, '--default-search' => './', @ARGV);
|
||||||
my $exit = $?;
|
my $exit = $?;
|
||||||
|
|
||||||
# This makes sure it works with prove.
|
# This makes sure it works with prove.
|
||||||
|
|||||||
Reference in New Issue
Block a user