Compare commits
210 Commits
lazy-trees
...
like-sub
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
250780aaf2 | ||
|
|
7de7122479 | ||
|
|
ada51d70fc | ||
|
|
d7986226f0 | ||
|
|
b3e0d9a8b7 | ||
|
|
5728011da1 | ||
|
|
559376e907 | ||
|
|
998df1657e | ||
|
|
f99cdaf5fe | ||
|
|
3bf00e31c0 | ||
|
|
e149da7b9b | ||
|
|
e81c36ac92 | ||
|
|
743795b2b0 | ||
|
|
50378aef22 | ||
|
|
92155f9a07 | ||
|
|
29ce5c603c | ||
|
|
4bd687e3e6 | ||
|
|
1b8154e67f | ||
|
|
b72528be50 | ||
|
|
8b48579593 | ||
|
|
ef7bf1e67b | ||
|
|
ab1f64aa4d | ||
|
|
3f913a771d | ||
|
|
71986632ce | ||
|
|
1665aed5e3 | ||
|
|
b676b08fac | ||
|
|
d614163e9c | ||
|
|
99afff03b0 | ||
|
|
8f56209bd6 | ||
|
|
806c375c33 | ||
|
|
669617ab54 | ||
|
|
c45c06509a | ||
|
|
9db5d0a88d | ||
|
|
973cb644d3 | ||
|
|
e499509595 | ||
|
|
ceff5c5cfe | ||
|
|
878c0f240e | ||
|
|
c1bd50a80d | ||
|
|
14aabc1cc9 | ||
|
|
7b826ec5ad | ||
|
|
838648c0ce | ||
|
|
6ac4292912 | ||
|
|
b503280256 | ||
|
|
b4c91b5a6a | ||
|
|
8477009310 | ||
|
|
c62eaf248f | ||
|
|
13b5f007ef | ||
|
|
7f5889559e | ||
|
|
5ee0e443e4 | ||
|
|
323b556dc8 | ||
|
|
458b9e4242 | ||
|
|
fcde5908d8 | ||
|
|
083ef46c12 | ||
|
|
7a53b866f6 | ||
|
|
8a02bb7c36 | ||
|
|
c64eed7d07 | ||
|
|
aed130cd17 | ||
|
|
7a6c401d42 | ||
|
|
b5ed0787f7 | ||
|
|
c5f37eca91 | ||
|
|
73b6c1fb11 | ||
|
|
4bbc7b8f75 | ||
|
|
d6d6d1b649 | ||
|
|
1bd195a513 | ||
|
|
1471aacadc | ||
|
|
62ddeb0ff0 | ||
|
|
a876e46894 | ||
|
|
6df06b089e | ||
|
|
cc50fdff6f | ||
|
|
b1fa6b3aac | ||
|
|
f6a2b7562a | ||
|
|
07cb5d1b7c | ||
|
|
449eb2d873 | ||
|
|
2bdbf51d7d | ||
|
|
9e7ac58042 | ||
|
|
d45e14fd43 | ||
|
|
9a86da0e7b | ||
|
|
d02e20a4c1 | ||
|
|
70e5469303 | ||
|
|
2e6ee28f9b | ||
|
|
20b0ad3ba2 | ||
|
|
7386caaecf | ||
|
|
84c46b6b68 | ||
|
|
f1d9230f25 | ||
|
|
f5c0efb11e | ||
|
|
4e8fbaa3d6 | ||
|
|
34c51fcea9 | ||
|
|
4ac31c89df | ||
|
|
db7aa01b8d | ||
|
|
89cfe26533 | ||
|
|
588a0c5269 | ||
|
|
02e453fc8c | ||
|
|
75f26f1fc4 | ||
|
|
3c89067f52 | ||
|
|
abd858d3dc | ||
|
|
163dbf7f54 | ||
|
|
642156372f | ||
|
|
7517c134c5 | ||
|
|
6e67884ff1 | ||
|
|
a6b6c5a539 | ||
|
|
ebfefb9161 | ||
|
|
8783dd53f6 | ||
|
|
f3a760ad9c | ||
|
|
8c10331ee8 | ||
|
|
20f5a2120c | ||
|
|
b56d2383c1 | ||
|
|
2bd67562b5 | ||
|
|
69a5b00e60 | ||
|
|
1d80b72ffb | ||
|
|
105fd18fee | ||
|
|
f6f817926a | ||
|
|
d0d3b0a298 | ||
|
|
3f932a6731 | ||
|
|
aaa0e128c1 | ||
|
|
4515b5aa17 | ||
|
|
411e4d0c24 | ||
|
|
831021808c | ||
|
|
2ee0068fdc | ||
|
|
31ea6458ca | ||
|
|
20c8263e3c | ||
|
|
91bbd5366f | ||
|
|
a45a27851b | ||
|
|
6a54ab24e2 | ||
|
|
58707438ba | ||
|
|
86cd5e9076 | ||
|
|
11f8030b0f | ||
|
|
3df8feb3a2 | ||
|
|
069b7775c5 | ||
|
|
e3443cd22a | ||
|
|
8046ec2668 | ||
|
|
9ba4417940 | ||
|
|
a5d44b60ea | ||
|
|
363604846a | ||
|
|
162b538912 | ||
|
|
104baef503 | ||
|
|
3c5636162a | ||
|
|
874fcae1e8 | ||
|
|
4dc8fe0b08 | ||
|
|
67eeabd518 | ||
|
|
622c25e3c4 | ||
|
|
f216bce0e6 | ||
|
|
4d1c850512 | ||
|
|
c922e73c11 | ||
|
|
e172461e55 | ||
|
|
0917145622 | ||
|
|
2bda7ca642 | ||
|
|
831a2d9bd5 | ||
|
|
5db374cb50 | ||
|
|
e9da80fff6 | ||
|
|
8f48e4ddec | ||
|
|
33f8a36736 | ||
|
|
6a5fb9efae | ||
|
|
c1a5ff3959 | ||
|
|
8520ab1391 | ||
|
|
8a413ce71a | ||
|
|
b7c864c515 | ||
|
|
e2195c46d1 | ||
|
|
113836ebae | ||
|
|
00d30874da | ||
|
|
35ccc9ebb2 | ||
|
|
9f0427385f | ||
|
|
b23431a657 | ||
|
|
60e2c377d3 | ||
|
|
a78664f1b5 | ||
|
|
46246dcae3 | ||
|
|
d135b123cd | ||
|
|
526e8bd744 | ||
|
|
5c35d1be20 | ||
|
|
ce001bb142 | ||
|
|
9f69bb5c2c | ||
|
|
a0c8440a5c | ||
|
|
13ef4e3c5d | ||
|
|
b4099df91e | ||
|
|
082495e34e | ||
|
|
399b61ff67 | ||
|
|
3da6ef0d6d | ||
|
|
f88bef15ed | ||
|
|
a084e204ae | ||
|
|
ecfa817d30 | ||
|
|
8d53c3ca11 | ||
|
|
810d2e6b51 | ||
|
|
f44d3d6ec9 | ||
|
|
65c1249227 | ||
|
|
73dff15039 | ||
|
|
ddd3ac3a4d | ||
|
|
c7716817a9 | ||
|
|
5b35e13898 | ||
|
|
96e36201eb | ||
|
|
ad99d3366f | ||
|
|
f48f00ee6d | ||
|
|
d1fac69c21 | ||
|
|
01802efc17 | ||
|
|
7f816e3237 | ||
|
|
213879484d | ||
|
|
fd765bc97a | ||
|
|
d1d171ee90 | ||
|
|
70ad3a924a | ||
|
|
3526d61ff2 | ||
|
|
143c31734f | ||
|
|
a81c6a3a80 | ||
|
|
750978a192 | ||
|
|
6e571e26ff | ||
|
|
92b627ac1b | ||
|
|
b430d41afd | ||
|
|
fd0ae78eba | ||
|
|
a778a89f04 | ||
|
|
365776f5d7 | ||
|
|
9f1b911625 | ||
|
|
2f494b7834 | ||
|
|
5db8642224 |
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
push:
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-18.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,6 +38,7 @@ t/jobs/declarative/project.json
|
||||
hydra-config.h
|
||||
hydra-config.h.in
|
||||
result
|
||||
result-*
|
||||
outputs
|
||||
config
|
||||
stamp-h1
|
||||
|
||||
12
Makefile.am
12
Makefile.am
@@ -1,8 +1,12 @@
|
||||
SUBDIRS = src t doc
|
||||
SUBDIRS = src doc
|
||||
if CAN_DO_CHECK
|
||||
SUBDIRS += t
|
||||
endif
|
||||
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
EXTRA_DIST = hydra-module.nix
|
||||
EXTRA_DIST = nixos-modules/hydra.nix
|
||||
|
||||
install-data-local: hydra-module.nix
|
||||
install-data-local: nixos-modules/hydra.nix
|
||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
||||
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
|
||||
$(INSTALL_DATA) nixos-modules/hydra.nix $(DESTDIR)$(datadir)/nix/hydra-module.nix
|
||||
|
||||
@@ -80,7 +80,7 @@ $ nix-build
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
```
|
||||
$ nix-shell
|
||||
$ ./bootstrap
|
||||
$ autoreconfPhase
|
||||
$ configurePhase # NOTE: not ./configure
|
||||
$ make
|
||||
```
|
||||
@@ -140,7 +140,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
|
||||
## Additional Resources
|
||||
|
||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
|
||||
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||
|
||||
|
||||
22
configure.ac
22
configure.ac
@@ -10,8 +10,6 @@ AC_PROG_LN_S
|
||||
AC_PROG_LIBTOOL
|
||||
AC_PROG_CXX
|
||||
|
||||
CXXFLAGS+=" -std=c++17"
|
||||
|
||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
||||
|
||||
AC_ARG_WITH([docbook-xsl],
|
||||
@@ -55,9 +53,6 @@ PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
||||
testPath="$(dirname $(type -p expr))"
|
||||
AC_SUBST(testPath)
|
||||
|
||||
jobsPath="$(realpath ./t/jobs)"
|
||||
AC_SUBST(jobsPath)
|
||||
|
||||
CXXFLAGS+=" -include nix/config.h"
|
||||
|
||||
AC_CONFIG_FILES([
|
||||
@@ -73,11 +68,22 @@ AC_CONFIG_FILES([
|
||||
src/lib/Makefile
|
||||
src/root/Makefile
|
||||
src/script/Makefile
|
||||
t/Makefile
|
||||
t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
])
|
||||
|
||||
# Tests might be filtered out
|
||||
AM_CONDITIONAL([CAN_DO_CHECK], [test -f "$srcdir/t/api-test.t"])
|
||||
AM_COND_IF(
|
||||
[CAN_DO_CHECK],
|
||||
[
|
||||
jobsPath="$(realpath ./t/jobs)"
|
||||
AC_SUBST(jobsPath)
|
||||
AC_CONFIG_FILES([
|
||||
t/Makefile
|
||||
t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
])
|
||||
])
|
||||
|
||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
||||
|
||||
AC_CONFIG_HEADER([hydra-config.h])
|
||||
|
||||
@@ -74,6 +74,30 @@ following:
|
||||
}
|
||||
}
|
||||
|
||||
Populating a Cache
|
||||
------------------
|
||||
|
||||
A common use for Hydra is to pre-build and cache derivations which
|
||||
take a long time to build. While it is possible to direcly access the
|
||||
Hydra server's store over SSH, a more scalable option is to upload
|
||||
built derivations to a remote store like an [S3-compatible object
|
||||
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||
the `store_uri` parameter will cause Hydra to sign and upload
|
||||
derivations as they are built:
|
||||
|
||||
```
|
||||
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||
```
|
||||
|
||||
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||
compression on derivations to reduce CPU usage on the server, but
|
||||
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||
build logs because it has better browser support.
|
||||
|
||||
See [`nix help
|
||||
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||
for a description of the store URI format.
|
||||
|
||||
Statsd Configuration
|
||||
--------------------
|
||||
|
||||
@@ -131,8 +155,8 @@ use LDAP to manage roles and users.
|
||||
This is configured by defining the `<ldap>` block in the configuration file.
|
||||
In this block it's possible to configure the authentication plugin in the
|
||||
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
|
||||
The documentation for the available settings can be found [here]
|
||||
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
The documentation for the available settings can be found
|
||||
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
|
||||
Note that the bind password (if needed) should be supplied as an included file to
|
||||
prevent it from leaking to the Nix store.
|
||||
@@ -179,6 +203,7 @@ Example configuration:
|
||||
<role_search_options>
|
||||
deref = always
|
||||
</role_search_options>
|
||||
</store>
|
||||
</config>
|
||||
<role_mapping>
|
||||
# Make all users in the hydra_admin group Hydra admins
|
||||
|
||||
@@ -18,7 +18,7 @@ $ nix-shell
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
[nix-shell]$ ./bootstrap
|
||||
[nix-shell]$ autoreconfPhase
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make
|
||||
```
|
||||
@@ -30,6 +30,8 @@ foreman:
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||
|
||||
You can run just the Hydra web server in your source tree as follows:
|
||||
|
||||
```console
|
||||
|
||||
@@ -404,3 +404,10 @@ analogous:
|
||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||
|
||||
Content-addressed derivations
|
||||
-----------------------------
|
||||
|
||||
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||
|
||||
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
# Webhooks
|
||||
|
||||
Hydra can be notified by github's webhook to trigger a new evaluation when a
|
||||
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||
jobset has a github repo in its input.
|
||||
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
|
||||
click on `Add webhook`.
|
||||
|
||||
## GitHub
|
||||
|
||||
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||
and in the `Webhooks` tab click on `Add webhook`.
|
||||
|
||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||
- In `Content type` switch to `application/json`.
|
||||
@@ -11,3 +14,14 @@ click on `Add webhook`.
|
||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
## Gitea
|
||||
|
||||
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||
|
||||
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||
- Change the branch filter to match the git branch hydra builds.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
128
flake.lock
generated
128
flake.lock
generated
@@ -1,54 +1,111 @@
|
||||
{
|
||||
"nodes": {
|
||||
"lowdown-src": {
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-parts": {
|
||||
"inputs": {
|
||||
"nixpkgs-lib": [
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712014858,
|
||||
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"libgit2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1697646580,
|
||||
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-parts": "flake-parts",
|
||||
"libgit2": "libgit2",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-regression": "nixpkgs-regression",
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1661606874,
|
||||
"narHash": "sha256-9+rpYzI+SmxJn+EbYxjGv68Ucp22bdFUSy/4LkHkkDQ=",
|
||||
"lastModified": 1713874370,
|
||||
"narHash": "sha256-gW1mO/CvsQQ5gvgiwzxsGhPFI/tx30NING+qgF5Do0s=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "11e45768b34fdafdcf019ddbd337afa16127ff0f",
|
||||
"rev": "1c8150ac312b5f9ba1b3f6768ff43b09867e5883",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "2.11.0",
|
||||
"ref": "2.22-maintenance",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1657693803,
|
||||
"narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=",
|
||||
"lastModified": 1712848736,
|
||||
"narHash": "sha256-CzZwhqyLlebljv1zFS2KWVH/3byHND0LfaO1jKsGuVo=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "365e1b3a859281cf11b94f87231adeabbdd878a2",
|
||||
"rev": "1d6a23f11e44d0fb64b3237569b87658a9eb5643",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.05-small",
|
||||
"ref": "nixos-23.11-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -69,13 +126,42 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"nix": "nix",
|
||||
"flake-compat": [
|
||||
"nix"
|
||||
],
|
||||
"flake-utils": "flake-utils",
|
||||
"gitignore": [
|
||||
"nix"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": [
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712897695,
|
||||
"narHash": "sha256-nMirxrGteNAl9sWiOhoN5tIHyjBbVi5e2tgZUgZlK3Y=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "40e6053ecb65fcbf12863338a6dcefb3f55f1bf8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
580
flake.nix
580
flake.nix
@@ -1,256 +1,30 @@
|
||||
{
|
||||
description = "A Nix-based continuous build system";
|
||||
|
||||
inputs.nixpkgs.follows = "nix/nixpkgs";
|
||||
inputs.nix.url = "github:NixOS/nix/2.11.0";
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11-small";
|
||||
inputs.nix.url = "github:NixOS/nix/2.22-maintenance";
|
||||
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
outputs = { self, nixpkgs, nix }:
|
||||
let
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
|
||||
|
||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
forEachSystem = nixpkgs.lib.genAttrs systems;
|
||||
|
||||
overlayList = [ self.overlays.default nix.overlays.default ];
|
||||
|
||||
pkgsBySystem = forEachSystem (system: import nixpkgs {
|
||||
inherit system;
|
||||
overlays = [ self.overlays.default nix.overlays.default ];
|
||||
overlays = overlayList;
|
||||
});
|
||||
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [ self.nixosModules.hydraTest ];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
binaryCaches = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
rec {
|
||||
|
||||
# A Nixpkgs overlay that provides a 'hydra' package.
|
||||
overlays.default = final: prev: {
|
||||
|
||||
# Add LDAP dependencies that aren't currently found within nixpkgs.
|
||||
perlPackages = prev.perlPackages // {
|
||||
|
||||
PrometheusTiny = final.perlPackages.buildPerlPackage {
|
||||
pname = "Prometheus-Tiny";
|
||||
version = "0.007";
|
||||
src = final.fetchurl {
|
||||
url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-0.007.tar.gz";
|
||||
sha256 = "0ef8b226a2025cdde4df80129dd319aa29e884e653c17dc96f4823d985c028ec";
|
||||
};
|
||||
buildInputs = with final.perlPackages; [ HTTPMessage Plack TestException ];
|
||||
meta = {
|
||||
homepage = "https://github.com/robn/Prometheus-Tiny";
|
||||
description = "A tiny Prometheus client";
|
||||
license = with final.lib.licenses; [ artistic1 gpl1Plus ];
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
hydra = with final; let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = with perlPackages; lib.closePropagation
|
||||
[
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
final.nix.perl-bindings
|
||||
git
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
];
|
||||
};
|
||||
|
||||
in
|
||||
stdenv.mkDerivation {
|
||||
|
||||
name = "hydra-${version}";
|
||||
|
||||
src = self;
|
||||
|
||||
buildInputs =
|
||||
[
|
||||
makeWrapper
|
||||
autoconf
|
||||
automake
|
||||
libtool
|
||||
unzip
|
||||
nukeReferences
|
||||
pkg-config
|
||||
libpqxx
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
subversion
|
||||
breezy
|
||||
openssl
|
||||
bzip2
|
||||
libxslt
|
||||
final.nix
|
||||
perlDeps
|
||||
perl
|
||||
mdbook
|
||||
pixz
|
||||
boost
|
||||
postgresql_13
|
||||
(if lib.versionAtLeast lib.version "20.03pre"
|
||||
then nlohmann_json
|
||||
else nlohmann_json.override { multipleHeaders = true; })
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
foreman
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
openldap
|
||||
python3
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
final.nix
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
preConfigure = "autoreconf -vfi";
|
||||
|
||||
NIX_LDFLAGS = [ "-lpthread" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
doCheck = true;
|
||||
|
||||
preCheck = ''
|
||||
patchShebangs .
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${final.nix.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${final.stdenv.system}";
|
||||
passthru = { inherit perlDeps; inherit (final) nix; };
|
||||
hydra = final.callPackage ./package.nix {
|
||||
inherit (nixpkgs.lib) fileset;
|
||||
rawSrc = self;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -258,9 +32,15 @@
|
||||
|
||||
build = forEachSystem (system: packages.${system}.hydra);
|
||||
|
||||
buildNoTests = forEachSystem (system:
|
||||
packages.${system}.hydra.overrideAttrs (_: {
|
||||
doCheck = false;
|
||||
})
|
||||
);
|
||||
|
||||
manual = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
pkgs.runCommand "hydra-manual-${version}" { }
|
||||
pkgs.runCommand "hydra-manual-${pkgs.hydra.version}" { }
|
||||
''
|
||||
mkdir -p $out/share
|
||||
cp -prvd ${pkgs.hydra}/share/doc $out/share/
|
||||
@@ -269,283 +49,9 @@
|
||||
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
||||
'');
|
||||
|
||||
tests.install = forEachSystem (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port("3000")
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
tests.notifications = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port("8086")
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port("3000")
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
tests.gitea = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
makeTest {
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nix = {
|
||||
distributedBuilds = true;
|
||||
buildMachines = [{
|
||||
hostName = "localhost";
|
||||
systems = [ system ];
|
||||
}];
|
||||
binaryCaches = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
disableRegistration = true;
|
||||
httpPort = 3001;
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly two status updates for latest commit!"
|
||||
assert response[0]['status'] == "success", "Expected latest status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected first status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
tests.validate-openapi = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
tests = import ./nixos-tests.nix {
|
||||
inherit forEachSystem nixpkgs pkgsBySystem nixosModules;
|
||||
};
|
||||
|
||||
container = nixosConfigurations.container.config.system.build.toplevel;
|
||||
};
|
||||
@@ -561,56 +67,16 @@
|
||||
default = pkgsBySystem.${system}.hydra;
|
||||
});
|
||||
|
||||
nixosModules.hydra = {
|
||||
imports = [ ./hydra-module.nix ];
|
||||
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
|
||||
};
|
||||
|
||||
nixosModules.hydraTest = { pkgs, ... }: {
|
||||
imports = [ self.nixosModules.hydra ];
|
||||
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_11;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
nixosModules.hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
nixosModules = import ./nixos-modules {
|
||||
overlays = overlayList;
|
||||
};
|
||||
|
||||
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules =
|
||||
[
|
||||
self.nixosModules.hydra
|
||||
self.nixosModules.overlayNixpkgsForThisHydra
|
||||
self.nixosModules.hydraTest
|
||||
self.nixosModules.hydraProxy
|
||||
{
|
||||
|
||||
@@ -533,13 +533,13 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/eval/{build-id}:
|
||||
/eval/{eval-id}:
|
||||
get:
|
||||
summary: Retrieves evaluations identified by build id
|
||||
summary: Retrieves evaluations identified by eval id
|
||||
parameters:
|
||||
- name: build-id
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: build identifier
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
@@ -551,6 +551,24 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEval'
|
||||
|
||||
/eval/{eval-id}/builds:
|
||||
get:
|
||||
summary: Retrieves all builds belonging to an evaluation identified by eval id
|
||||
parameters:
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
'200':
|
||||
description: builds
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEvalBuilds'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
|
||||
@@ -796,6 +814,13 @@ components:
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/JobsetEvalInput'
|
||||
|
||||
JobsetEvalBuilds:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/Build'
|
||||
|
||||
JobsetOverview:
|
||||
type: array
|
||||
items:
|
||||
@@ -870,7 +895,7 @@ components:
|
||||
description: Size of the produced file
|
||||
type: integer
|
||||
defaultpath:
|
||||
description: This is a Git/Mercurial commit hash or a Subversion revision number
|
||||
description: if path is a directory, the default file relative to path to be served
|
||||
type: string
|
||||
'type':
|
||||
description: Types of build product (user defined)
|
||||
|
||||
49
nixos-modules/default.nix
Normal file
49
nixos-modules/default.nix
Normal file
@@ -0,0 +1,49 @@
|
||||
{ overlays }:
|
||||
|
||||
{
|
||||
hydra = import ./hydra.nix;
|
||||
|
||||
overlayNixpkgsForThisHydra = { pkgs, ... }: {
|
||||
nixpkgs = { inherit overlays; };
|
||||
services.hydra.package = pkgs.hydra;
|
||||
};
|
||||
|
||||
hydraTest = { pkgs, ... }: {
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_12;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -68,7 +68,7 @@ in
|
||||
|
||||
package = mkOption {
|
||||
type = types.path;
|
||||
default = pkgs.hydra;
|
||||
default = pkgs.hydra_unstable;
|
||||
defaultText = literalExpression "pkgs.hydra";
|
||||
description = "The Hydra package.";
|
||||
};
|
||||
@@ -233,7 +233,7 @@ in
|
||||
gc-keep-outputs = true;
|
||||
gc-keep-derivations = true;
|
||||
};
|
||||
|
||||
|
||||
services.hydra-dev.extraConfig =
|
||||
''
|
||||
using_frontend_proxy = 1
|
||||
@@ -340,7 +340,7 @@ in
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
309
nixos-tests.nix
Normal file
309
nixos-tests.nix
Normal file
@@ -0,0 +1,309 @@
|
||||
{ forEachSystem, nixpkgs, pkgsBySystem, nixosModules }:
|
||||
|
||||
let
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
nixosModules.hydra
|
||||
nixosModules.overlayNixpkgsForThisHydra
|
||||
nixosModules.hydraTest
|
||||
];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
install = forEachSystem (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
notifications = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
gitea = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||
nix = {
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
settings = {
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
server.HTTP_PORT = 3001;
|
||||
};
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
validate-openapi = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
|
||||
}
|
||||
272
package.nix
Normal file
272
package.nix
Normal file
@@ -0,0 +1,272 @@
|
||||
{ stdenv
|
||||
, lib
|
||||
, fileset
|
||||
|
||||
, rawSrc
|
||||
|
||||
, buildEnv
|
||||
|
||||
, perlPackages
|
||||
|
||||
, nix
|
||||
, git
|
||||
|
||||
, makeWrapper
|
||||
, autoreconfHook
|
||||
, nukeReferences
|
||||
, pkg-config
|
||||
, mdbook
|
||||
|
||||
, unzip
|
||||
, libpqxx
|
||||
, top-git
|
||||
, mercurial
|
||||
, darcs
|
||||
, subversion
|
||||
, breezy
|
||||
, openssl
|
||||
, bzip2
|
||||
, libxslt
|
||||
, perl
|
||||
, pixz
|
||||
, boost
|
||||
, postgresql_13
|
||||
, nlohmann_json
|
||||
, prometheus-cpp
|
||||
|
||||
, cacert
|
||||
, foreman
|
||||
, glibcLocales
|
||||
, libressl
|
||||
, openldap
|
||||
, python3
|
||||
|
||||
, openssh
|
||||
, coreutils
|
||||
, findutils
|
||||
, gzip
|
||||
, xz
|
||||
, gnutar
|
||||
, gnused
|
||||
|
||||
, rpm
|
||||
, dpkg
|
||||
, cdrkit
|
||||
}:
|
||||
|
||||
let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = lib.closePropagation
|
||||
([
|
||||
nix.perl-bindings
|
||||
git
|
||||
] ++ (with perlPackages; [
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
]));
|
||||
};
|
||||
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "hydra";
|
||||
inherit version;
|
||||
|
||||
src = fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = fileset.unions ([
|
||||
./version.txt
|
||||
./configure.ac
|
||||
./Makefile.am
|
||||
./src
|
||||
./doc
|
||||
./nixos-modules/hydra.nix
|
||||
# These are always needed to appease Automake
|
||||
./t/Makefile.am
|
||||
./t/jobs/config.nix.in
|
||||
./t/jobs/declarative/project.json.in
|
||||
] ++ lib.optionals finalAttrs.doCheck [
|
||||
./t
|
||||
./.perlcriticrc
|
||||
./.yath.rc
|
||||
]);
|
||||
};
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
autoreconfHook
|
||||
nukeReferences
|
||||
pkg-config
|
||||
mdbook
|
||||
nix
|
||||
perlDeps
|
||||
perl
|
||||
unzip
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
libpqxx
|
||||
openssl
|
||||
libxslt
|
||||
nix
|
||||
perlDeps
|
||||
perl
|
||||
boost
|
||||
nlohmann_json
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
bzip2
|
||||
darcs
|
||||
foreman
|
||||
top-git
|
||||
mercurial
|
||||
subversion
|
||||
breezy
|
||||
openldap
|
||||
postgresql_13
|
||||
pixz
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
python3
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
nix
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
NIX_LDFLAGS = [ "-lpthread" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
doCheck = true;
|
||||
|
||||
preCheck = ''
|
||||
patchShebangs .
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nix.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||
passthru = { inherit perlDeps nix; };
|
||||
})
|
||||
@@ -7,6 +7,9 @@
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "eval-settings.hh"
|
||||
#include "signals.hh"
|
||||
#include "terminal.hh"
|
||||
#include "util.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "globals.hh"
|
||||
@@ -25,7 +28,8 @@
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
void check_pid_status_nonblocking(pid_t check_pid) {
|
||||
void check_pid_status_nonblocking(pid_t check_pid)
|
||||
{
|
||||
// Only check 'initialized' and known PID's
|
||||
if (check_pid <= 0) { return; }
|
||||
|
||||
@@ -52,7 +56,7 @@ using namespace nix;
|
||||
static Path gcRootsDir;
|
||||
static size_t maxMemorySize;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
|
||||
{
|
||||
Path releaseExpr;
|
||||
bool flake = false;
|
||||
@@ -85,7 +89,7 @@ struct MyArgs : MixEvalArgs, MixCommonArgs
|
||||
|
||||
static MyArgs myArgs;
|
||||
|
||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||
static std::string queryMetaStrings(EvalState & state, PackageInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||
{
|
||||
Strings res;
|
||||
std::function<void(Value & v)> rec;
|
||||
@@ -93,14 +97,14 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std:
|
||||
rec = [&](Value & v) {
|
||||
state.forceValue(v, noPos);
|
||||
if (v.type() == nString)
|
||||
res.push_back(v.string.s);
|
||||
res.emplace_back(v.string_view());
|
||||
else if (v.isList())
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
rec(*v.listElems()[n]);
|
||||
else if (v.type() == nAttrs) {
|
||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs->end())
|
||||
res.push_back(std::string(state.forceString(*a->value)));
|
||||
auto a = v.attrs()->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs()->end())
|
||||
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -129,17 +133,17 @@ static void worker(
|
||||
LockFlags {
|
||||
.updateLockFile = false,
|
||||
.useRegistries = false,
|
||||
.allowMutable = false,
|
||||
.allowUnlocked = false,
|
||||
});
|
||||
|
||||
callFlake(state, lockedFlake, *vFlake);
|
||||
|
||||
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
|
||||
auto vOutputs = vFlake->attrs()->get(state.symbols.create("outputs"))->value;
|
||||
state.forceValue(*vOutputs, noPos);
|
||||
|
||||
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
|
||||
auto aHydraJobs = vOutputs->attrs()->get(state.symbols.create("hydraJobs"));
|
||||
if (!aHydraJobs)
|
||||
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
|
||||
aHydraJobs = vOutputs->attrs()->get(state.symbols.create("checks"));
|
||||
if (!aHydraJobs)
|
||||
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
|
||||
|
||||
@@ -174,10 +178,14 @@ static void worker(
|
||||
|
||||
if (auto drv = getDerivation(state, *v, false)) {
|
||||
|
||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
||||
// CA derivations do not have static output paths, so we
|
||||
// have to defensively not query output paths in case we
|
||||
// encounter one.
|
||||
PackageInfo::Outputs outputs = drv->queryOutputs(
|
||||
!experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||
|
||||
if (drv->querySystem() == "unknown")
|
||||
throw EvalError("derivation must have a 'system' attribute");
|
||||
state.error<EvalError>("derivation must have a 'system' attribute").debugThrow();
|
||||
|
||||
auto drvPath = state.store->printStorePath(drv->requireDrvPath());
|
||||
|
||||
@@ -196,27 +204,31 @@ static void worker(
|
||||
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
|
||||
|
||||
/* If this is an aggregate, then get its constituents. */
|
||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, a->pos)) {
|
||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
||||
auto a = v->attrs()->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
|
||||
auto a = v->attrs()->get(state.symbols.create("constituents"));
|
||||
if (!a)
|
||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
||||
state.error<EvalError>("derivation must have a ‘constituents’ attribute").debugThrow();
|
||||
|
||||
NixStringContext context;
|
||||
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
|
||||
for (auto & c : context)
|
||||
std::visit(overloaded {
|
||||
[&](const NixStringContextElem::Built & b) {
|
||||
job["constituents"].push_back(b.drvPath->to_string(*state.store));
|
||||
},
|
||||
[&](const NixStringContextElem::Opaque & o) {
|
||||
},
|
||||
[&](const NixStringContextElem::DrvDeep & d) {
|
||||
},
|
||||
}, c.raw);
|
||||
|
||||
PathSet context;
|
||||
state.coerceToString(a->pos, *a->value, context, true, false);
|
||||
for (auto & i : context)
|
||||
if (i.at(0) == '!') {
|
||||
size_t index = i.find("!", 1);
|
||||
job["constituents"].push_back(std::string(i, index + 1));
|
||||
}
|
||||
|
||||
state.forceList(*a->value, a->pos);
|
||||
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
|
||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||
auto v = a->value->listElems()[n];
|
||||
state.forceValue(*v, noPos);
|
||||
if (v->type() == nString)
|
||||
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
|
||||
job["namedConstituents"].push_back(v->string_view());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,21 +243,26 @@ static void worker(
|
||||
}
|
||||
|
||||
nlohmann::json out;
|
||||
for (auto & j : outputs)
|
||||
// FIXME: handle CA/impure builds.
|
||||
if (j.second)
|
||||
out[j.first] = state.store->printStorePath(*j.second);
|
||||
for (auto & [outputName, optOutputPath] : outputs) {
|
||||
if (optOutputPath) {
|
||||
out[outputName] = state.store->printStorePath(*optOutputPath);
|
||||
} else {
|
||||
// See the `queryOutputs` call above; we should
|
||||
// not encounter missing output paths otherwise.
|
||||
assert(experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||
out[outputName] = nullptr;
|
||||
}
|
||||
}
|
||||
job["outputs"] = std::move(out);
|
||||
|
||||
reply["job"] = std::move(job);
|
||||
}
|
||||
|
||||
else if (v->type() == nAttrs) {
|
||||
auto attrs = nlohmann::json::array();
|
||||
StringSet ss;
|
||||
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
|
||||
for (auto & i : v->attrs()->lexicographicOrder(state.symbols)) {
|
||||
std::string name(state.symbols[i->name]);
|
||||
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
|
||||
if (name.find(' ') != std::string::npos) {
|
||||
printError("skipping job with illegal name '%s'", name);
|
||||
continue;
|
||||
}
|
||||
@@ -257,7 +274,7 @@ static void worker(
|
||||
else if (v->type() == nNull)
|
||||
;
|
||||
|
||||
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
|
||||
else state.error<TypeError>("attribute '%s' is %s, which is not supported", attrPath, showType(*v)).debugThrow();
|
||||
|
||||
} catch (EvalError & e) {
|
||||
auto msg = e.msg();
|
||||
@@ -351,7 +368,7 @@ int main(int argc, char * * argv)
|
||||
]()
|
||||
{
|
||||
try {
|
||||
EvalState state(myArgs.searchPath, openStore());
|
||||
EvalState state(myArgs.lookupPath, openStore());
|
||||
Bindings & autoArgs = *myArgs.getAutoArgs(state);
|
||||
worker(state, autoArgs, *to, *from);
|
||||
} catch (Error & e) {
|
||||
@@ -416,7 +433,11 @@ int main(int argc, char * * argv)
|
||||
|
||||
if (response.find("attrs") != response.end()) {
|
||||
for (auto & i : response["attrs"]) {
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
|
||||
std::string path = i;
|
||||
if (path.find(".") != std::string::npos){
|
||||
path = "\"" + path + "\"";
|
||||
}
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path;
|
||||
newAttrs.insert(s);
|
||||
}
|
||||
}
|
||||
@@ -507,7 +528,7 @@ int main(int argc, char * * argv)
|
||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||
auto drv2 = store->readDerivation(drvPath2);
|
||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
||||
drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first};
|
||||
}
|
||||
|
||||
if (brokenJobs.empty()) {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#include "hydra-config.hh"
|
||||
#include "pool.hh"
|
||||
#include "shared.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
@@ -37,7 +38,7 @@ class JobsetId {
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||
@@ -366,6 +367,9 @@ struct Evaluator
|
||||
printInfo("received jobset event");
|
||||
}
|
||||
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in database monitor thread: %s", e.what());
|
||||
sleep(30);
|
||||
@@ -473,6 +477,9 @@ struct Evaluator
|
||||
while (true) {
|
||||
try {
|
||||
loop();
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in main loop: %s", e.what());
|
||||
sleep(30);
|
||||
|
||||
@@ -6,27 +6,22 @@
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "build-result.hh"
|
||||
#include "path.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "state.hh"
|
||||
#include "current-process.hh"
|
||||
#include "processes.hh"
|
||||
#include "util.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "ssh.hh"
|
||||
#include "finally.hh"
|
||||
#include "url.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
struct Child
|
||||
{
|
||||
Pid pid;
|
||||
AutoCloseFD to, from;
|
||||
};
|
||||
|
||||
|
||||
static void append(Strings & dst, const Strings & src)
|
||||
{
|
||||
dst.insert(dst.end(), src.begin(), src.end());
|
||||
}
|
||||
namespace nix::build_remote {
|
||||
|
||||
static Strings extraStoreArgs(std::string & machine)
|
||||
{
|
||||
@@ -48,64 +43,39 @@ static Strings extraStoreArgs(std::string & machine)
|
||||
return result;
|
||||
}
|
||||
|
||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
{
|
||||
std::string pgmName;
|
||||
Pipe to, from;
|
||||
to.create();
|
||||
from.create();
|
||||
|
||||
Strings argv;
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
if (machine->isLocalhost()) {
|
||||
pgmName = "nix-store";
|
||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
} else {
|
||||
pgmName = "ssh";
|
||||
auto sshName = machine->sshName;
|
||||
Strings extraArgs = extraStoreArgs(sshName);
|
||||
argv = {"ssh", sshName};
|
||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||
if (machine->sshPublicHostKey != "") {
|
||||
Path fileName = tmpDir + "/host-key";
|
||||
auto p = machine->sshName.find("@");
|
||||
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
|
||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||
}
|
||||
append(argv,
|
||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
, "--", "nix-store", "--serve", "--write" });
|
||||
append(argv, extraArgs);
|
||||
command.splice(command.end(), extraStoreArgs(machine->sshName));
|
||||
}
|
||||
|
||||
child.pid = startProcess([&]() {
|
||||
restoreProcessContext();
|
||||
|
||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup input pipe to stdin");
|
||||
|
||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup output pipe to stdout");
|
||||
|
||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr");
|
||||
|
||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
||||
|
||||
throw SysError("cannot start %s", pgmName);
|
||||
auto ret = master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
});
|
||||
|
||||
to.readSide = -1;
|
||||
from.writeSide = -1;
|
||||
// XXX: determine the actual max value we can use from /proc.
|
||||
|
||||
child.to = to.writeSide.release();
|
||||
child.from = from.readSide.release();
|
||||
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||
|
||||
int pipesize = 1024 * 1024;
|
||||
|
||||
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
||||
bool useSubstitutes = false)
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
{
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
@@ -115,13 +85,10 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
||||
worker_proto::write(destStore, to, closure);
|
||||
to.flush();
|
||||
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {});
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
@@ -133,20 +100,20 @@ static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
to << cmdImportPaths;
|
||||
destStore.exportPaths(missing, to);
|
||||
to.flush();
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(from) != 1)
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
@@ -174,40 +141,311 @@ StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::map<StorePath, UnkeyedValidPathInfo> queryPathInfos(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePathSet & outputs,
|
||||
size_t & totalNarSize
|
||||
)
|
||||
{
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, UnkeyedValidPathInfo> infos;
|
||||
conn.to << ServeProto::Command::QueryPathInfos;
|
||||
ServeProto::write(localStore, conn, outputs);
|
||||
conn.to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(conn.from);
|
||||
if (storePathS == "") break;
|
||||
|
||||
auto storePath = localStore.parseStorePath(storePathS);
|
||||
auto info = ServeProto::Serialise<UnkeyedValidPathInfo>::read(localStore, conn);
|
||||
totalNarSize += info.narSize;
|
||||
infos.insert_or_assign(std::move(storePath), std::move(info));
|
||||
}
|
||||
|
||||
return infos;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(
|
||||
conn, narMembers, localStore, destStore,
|
||||
ValidPathInfo { path, info });
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* using namespace nix::build_remote; */
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
errorMsg = buildResult.errorMsg;
|
||||
isNonDeterministic = buildResult.isNonDeterministic;
|
||||
|
||||
switch ((BuildResult::Status) buildResult.status) {
|
||||
case BuildResult::Built:
|
||||
stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
stepStatus = bsSuccess;
|
||||
isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
stepStatus = bsFailed;
|
||||
canRetry = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
stepStatus = bsTimedOut;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
stepStatus = bsAborted;
|
||||
canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
stepStatus = bsNotDeterministic;
|
||||
canRetry = false;
|
||||
canCache = true;
|
||||
break;
|
||||
default:
|
||||
stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::TimedOut == 8);
|
||||
|
||||
std::string base(step->drvPath.to_string());
|
||||
result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
AutoDelete autoDelete(result.logFile, false);
|
||||
|
||||
createDirs(dirOf(result.logFile));
|
||||
|
||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
||||
|
||||
nix::Path tmpDir = createTempDir();
|
||||
AutoDelete tmpDirDel(tmpDir, true);
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
SSHMaster master {
|
||||
machine->sshName,
|
||||
machine->sshKey,
|
||||
machine->sshPublicHostKey,
|
||||
false, // no SSH master yet
|
||||
false, // no compression yet
|
||||
logFD.get(),
|
||||
};
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
Child child;
|
||||
openConnection(machine, tmpDir, logFD.get(), child);
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child.pid;
|
||||
activeStepState->pid = child->sshPid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@@ -222,36 +460,41 @@ void State::buildRemote(ref<Store> destStore,
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
FdSource from(child.from.get());
|
||||
FdSink to(child.to.get());
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += from.read;
|
||||
bytesSent += to.written;
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
/* Handshake. */
|
||||
unsigned int remoteVersion;
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
|
||||
try {
|
||||
to << SERVE_MAGIC_1 << 0x206;
|
||||
to.flush();
|
||||
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
||||
remoteVersion = readInt(from);
|
||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
||||
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->sshName);
|
||||
} catch (EndOfFile & e) {
|
||||
child.pid.wait();
|
||||
child->sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||
}
|
||||
|
||||
// Do not attempt to speak a newer version of the protocol.
|
||||
//
|
||||
// Per https://github.com/NixOS/nix/issues/9584 should be handled as
|
||||
// part of `handshake` in upstream nix.
|
||||
conn.remoteVersion = std::min(conn.remoteVersion, our_version);
|
||||
|
||||
{
|
||||
auto info(machine->state->connectInfo.lock());
|
||||
info->consecutiveFailures = 0;
|
||||
@@ -263,62 +506,12 @@ void State::buildRemote(ref<Store> destStore,
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
|
||||
StorePathSet inputs;
|
||||
BasicDerivation basicDrv(*step->drv);
|
||||
|
||||
for (auto & p : step->drv->inputSrcs)
|
||||
inputs.insert(p);
|
||||
|
||||
for (auto & input : step->drv->inputDrvs) {
|
||||
auto drv2 = localStore->readDerivation(input.first);
|
||||
for (auto & name : input.second) {
|
||||
if (auto i = get(drv2.outputs, name)) {
|
||||
auto outPath = i->path(*localStore, drv2.name, name);
|
||||
inputs.insert(*outPath);
|
||||
basicDrv.inputSrcs.insert(*outPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (localStore != std::shared_ptr<Store>(destStore)) {
|
||||
copyClosure(*localStore, *destStore,
|
||||
step->drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore->computeFSClosure(inputs, closure);
|
||||
copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
autoDelete.cancel();
|
||||
logFileDel.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
@@ -334,85 +527,17 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
||||
writeDerivation(to, *localStore, basicDrv);
|
||||
to << maxSilentTime << buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
||||
to << maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
to << repeats // == build-repeat
|
||||
<< step->isDeterministic; // == enforce-determinism
|
||||
}
|
||||
to.flush();
|
||||
BuildResult buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
|
||||
result.startTime = time(0);
|
||||
int res;
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
res = readInt(from);
|
||||
}
|
||||
result.stopTime = time(0);
|
||||
result.updateWithBuildResult(buildResult);
|
||||
|
||||
result.errorMsg = readString(from);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
result.timesBuilt = readInt(from);
|
||||
result.isNonDeterministic = readInt(from);
|
||||
auto start = readInt(from);
|
||||
auto stop = readInt(from);
|
||||
if (start && start) {
|
||||
/* Note: this represents the duration of a single
|
||||
round, rather than all rounds. */
|
||||
result.startTime = start;
|
||||
result.stopTime = stop;
|
||||
}
|
||||
}
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
|
||||
worker_proto::read(*localStore, from, Phantom<DrvOutputs> {});
|
||||
}
|
||||
switch ((BuildResult::Status) res) {
|
||||
case BuildResult::Built:
|
||||
result.stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
result.stepStatus = bsSuccess;
|
||||
result.isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canRetry = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
result.stepStatus = bsTimedOut;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
result.stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
result.stepStatus = bsNotDeterministic;
|
||||
result.canRetry = false;
|
||||
result.canCache = true;
|
||||
break;
|
||||
default:
|
||||
result.stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
|
||||
result.errorMsg = "";
|
||||
@@ -426,6 +551,10 @@ void State::buildRemote(ref<Store> destStore,
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
@@ -434,39 +563,8 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
size_t totalNarSize = 0;
|
||||
to << cmdQueryPathInfos;
|
||||
worker_proto::write(*localStore, to, outputs);
|
||||
to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(from);
|
||||
if (storePathS == "") break;
|
||||
auto deriver = readString(from); // deriver
|
||||
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
|
||||
readLongLong(from); // download size
|
||||
auto narSize = readLongLong(from);
|
||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
auto ca = parseContentAddressOpt(readString(from));
|
||||
readStrings<StringSet>(from); // sigs
|
||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
||||
assert(outputs.count(info.path));
|
||||
info.references = references;
|
||||
info.narSize = narSize;
|
||||
totalNarSize += info.narSize;
|
||||
info.narHash = narHash;
|
||||
info.ca = ca;
|
||||
if (deriver != "")
|
||||
info.deriver = localStore->parseStorePath(deriver);
|
||||
infos.insert_or_assign(info.path, info);
|
||||
}
|
||||
auto infos = build_remote::queryPathInfos(conn, *localStore, outputs, totalNarSize);
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
@@ -477,41 +575,30 @@ void State::buildRemote(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
to << cmdDumpStorePath << localStore->printStorePath(path);
|
||||
to.flush();
|
||||
|
||||
TeeSource tee(from, sink);
|
||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
||||
});
|
||||
|
||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child.to = -1;
|
||||
child.pid.wait();
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "source-accessor.hh"
|
||||
|
||||
#include <regex>
|
||||
|
||||
@@ -11,18 +11,18 @@ using namespace nix;
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const Derivation & drv)
|
||||
const OutputPathMap derivationOutputs)
|
||||
{
|
||||
BuildOutput res;
|
||||
|
||||
/* Compute the closure size. */
|
||||
StorePathSet outputs;
|
||||
StorePathSet closure;
|
||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
||||
if (i.second.second) {
|
||||
store->computeFSClosure(*i.second.second, closure);
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||
store->computeFSClosure(outputPath, closure);
|
||||
outputs.insert(outputPath);
|
||||
res.outputs.insert({outputName, outputPath});
|
||||
}
|
||||
for (auto & path : closure) {
|
||||
auto info = store->queryPathInfo(path);
|
||||
res.closureSize += info->narSize;
|
||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||
if (productsFile == narMembers.end() ||
|
||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
||||
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
assert(productsFile->second.contents);
|
||||
|
||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||
|
||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
||||
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||
product.isRegular = true;
|
||||
product.fileSize = file->second.fileSize.value();
|
||||
product.sha256hash = file->second.sha256.value();
|
||||
@@ -107,17 +107,16 @@ BuildOutput getBuildOutput(
|
||||
/* If no build products were explicitly declared, then add all
|
||||
outputs as a product of type "nix-build". */
|
||||
if (!explicitProducts) {
|
||||
for (auto & [name, output] : drv.outputs) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
BuildProduct product;
|
||||
auto outPath = output.path(*store, drv.name, name);
|
||||
product.path = store->printStorePath(*outPath);
|
||||
product.path = store->printStorePath(output);
|
||||
product.type = "nix-build";
|
||||
product.subtype = name == "out" ? "" : name;
|
||||
product.name = outPath->name();
|
||||
product.name = output.name();
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
assert(file != narMembers.end());
|
||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
||||
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
@@ -126,7 +125,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
res.releaseName = trim(file->second.contents.value());
|
||||
// FIXME: validate release name
|
||||
@@ -136,7 +135,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||
|
||||
@@ -98,8 +98,13 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
@@ -134,18 +139,18 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
repeats = std::max(repeats, i->second);
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
buildId = build->id;
|
||||
buildDrvPath = build->drvPath;
|
||||
maxSilentTime = build->maxSilentTime;
|
||||
buildTimeout = build->buildTimeout;
|
||||
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
if (!buildOneDone)
|
||||
@@ -206,7 +211,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
try {
|
||||
/* FIXME: referring builds may have conflicting timeouts. */
|
||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
||||
buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||
} catch (Error & e) {
|
||||
if (activeStep->state_.lock()->cancelled) {
|
||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||
@@ -221,7 +226,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
updateStep(ssPostProcessing);
|
||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
||||
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,9 +280,12 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
assert(stepNr);
|
||||
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||
if (!optOutputPath)
|
||||
throw Error(
|
||||
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||
outputName, localStore->printStorePath(step->drvPath));
|
||||
addRoot(*optOutputPath);
|
||||
}
|
||||
|
||||
/* Register success in the database for all Build objects that
|
||||
@@ -323,7 +331,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||
result.startTime, result.stopTime);
|
||||
}
|
||||
@@ -398,7 +406,7 @@ void State::failStep(
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
::Machine::ptr machine,
|
||||
bool & stepFinished)
|
||||
{
|
||||
/* Register failure in the database for all Build objects that
|
||||
@@ -451,7 +459,7 @@ void State::failStep(
|
||||
/* Mark all builds that depend on this derivation as failed. */
|
||||
for (auto & build : indirect) {
|
||||
if (build->finishedInDB) continue;
|
||||
printMsg(lvlError, format("marking build %1% as failed") % build->id);
|
||||
printError("marking build %1% as failed", build->id);
|
||||
txn.exec_params0
|
||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
|
||||
@@ -52,7 +52,7 @@ void State::dispatcher()
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
if (!*dispatcherWakeup_) {
|
||||
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
|
||||
debug("dispatcher sleeping for %1%s",
|
||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||
}
|
||||
@@ -60,7 +60,7 @@ void State::dispatcher()
|
||||
}
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("dispatcher: %1%") % e.what());
|
||||
printError("dispatcher: %s", e.what());
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
@@ -80,17 +80,118 @@ system_time State::doDispatch()
|
||||
jobset.second->pruneSteps();
|
||||
auto s2 = jobset.second->shareUsed();
|
||||
if (s1 != s2)
|
||||
printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
|
||||
% jobset.first.first % jobset.first.second % s1 % s2);
|
||||
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||
jobset.first.first, jobset.first.second, s1, s2);
|
||||
}
|
||||
}
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Start steps until we're out of steps or slots. */
|
||||
auto sleepUntil = system_time::max();
|
||||
bool keepGoing;
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
bool alreadyScheduled = false;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
do {
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
now = std::chrono::system_clock::now();
|
||||
|
||||
/* Copy the currentJobs field of each machine. This is
|
||||
necessary to ensure that the sort comparator below is
|
||||
@@ -98,7 +199,7 @@ system_time State::doDispatch()
|
||||
filter out temporarily disabled machines. */
|
||||
struct MachineInfo
|
||||
{
|
||||
Machine::ptr machine;
|
||||
::Machine::ptr machine;
|
||||
unsigned long currentJobs;
|
||||
};
|
||||
std::vector<MachineInfo> machinesSorted;
|
||||
@@ -138,104 +239,6 @@ system_time State::doDispatch()
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
/* Find a machine with a free slot and find a step to run
|
||||
on it. Once we find such a pair, we restart the outer
|
||||
loop because the machine sorting will have changed. */
|
||||
@@ -245,6 +248,8 @@ system_time State::doDispatch()
|
||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||
|
||||
for (auto & stepInfo : runnableSorted) {
|
||||
if (stepInfo.alreadyScheduled) continue;
|
||||
|
||||
auto & step(stepInfo.step);
|
||||
|
||||
/* Can this machine do this step? */
|
||||
@@ -271,6 +276,8 @@ system_time State::doDispatch()
|
||||
r.count--;
|
||||
}
|
||||
|
||||
stepInfo.alreadyScheduled = true;
|
||||
|
||||
/* Make a slot reservation and start a thread to
|
||||
do the build. */
|
||||
auto builderThread = std::thread(&State::builder, this,
|
||||
@@ -428,7 +435,7 @@ void Jobset::pruneSteps()
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||
: state(state), step(step), machine(machine)
|
||||
{
|
||||
machine->state->currentJobs++;
|
||||
|
||||
@@ -36,10 +36,12 @@ struct BuildOutput
|
||||
|
||||
std::list<BuildProduct> products;
|
||||
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
|
||||
std::map<std::string, BuildMetric> metrics;
|
||||
};
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<nix::Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const nix::Derivation & drv);
|
||||
const nix::OutputPathMap derivationOutputs);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
@@ -8,6 +9,9 @@
|
||||
|
||||
#include <prometheus/exposer.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "signals.hh"
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
@@ -15,20 +19,11 @@
|
||||
|
||||
#include "globals.hh"
|
||||
#include "hydra-config.hh"
|
||||
#include "json.hh"
|
||||
#include "s3-binary-cache-store.hh"
|
||||
#include "shared.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
|
||||
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
|
||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
||||
|
||||
}
|
||||
using nlohmann::json;
|
||||
|
||||
|
||||
std::string getEnvOrDie(const std::string & key)
|
||||
@@ -146,33 +141,51 @@ void State::parseMachines(const std::string & contents)
|
||||
if (tokens.size() < 3) continue;
|
||||
tokens.resize(8);
|
||||
|
||||
auto machine = std::make_shared<Machine>();
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2];
|
||||
if (tokens[3] != "")
|
||||
machine->maxJobs = string2Int<decltype(machine->maxJobs)>(tokens[3]).value();
|
||||
else
|
||||
machine->maxJobs = 1;
|
||||
machine->speedFactor = atof(tokens[4].c_str());
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
auto supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
for (auto & f : machine->mandatoryFeatures)
|
||||
machine->supportedFeatures.insert(f);
|
||||
if (tokens[7] != "" && tokens[7] != "-")
|
||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
||||
auto mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
|
||||
for (auto & f : mandatoryFeatures)
|
||||
supportedFeatures.insert(f);
|
||||
|
||||
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
||||
|
||||
auto machine = std::make_shared<::Machine>(nix::Machine {
|
||||
// `storeUri`, not yet used
|
||||
"",
|
||||
// `systemTypes`
|
||||
tokenizeString<StringSet>(tokens[1], ","),
|
||||
// `sshKey`
|
||||
tokens[2] == "-" ? "" : tokens[2],
|
||||
// `maxJobs`
|
||||
tokens[3] != ""
|
||||
? string2Int<MaxJobs>(tokens[3]).value()
|
||||
: 1,
|
||||
// `speedFactor`
|
||||
atof(tokens[4].c_str()),
|
||||
// `supportedFeatures`
|
||||
std::move(supportedFeatures),
|
||||
// `mandatoryFeatures`
|
||||
std::move(mandatoryFeatures),
|
||||
// `sshPublicHostKey`
|
||||
tokens[7] != "" && tokens[7] != "-"
|
||||
? base64Decode(tokens[7])
|
||||
: "",
|
||||
});
|
||||
|
||||
machine->sshName = tokens[0];
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->sshName);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->sshName);
|
||||
else
|
||||
printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName);
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<Machine::State>()
|
||||
? std::make_shared<::Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->sshName] = machine;
|
||||
}
|
||||
@@ -180,10 +193,10 @@ void State::parseMachines(const std::string & contents)
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
|
||||
/* Add a disabled Machine object to make sure stats are
|
||||
printInfo("removing machine ‘%1%’", m.first);
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<Machine>(*(m.second));
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
machine->enabled = false;
|
||||
newMachines[m.first] = machine;
|
||||
}
|
||||
@@ -211,7 +224,7 @@ void State::monitorMachinesFile()
|
||||
parseMachines("localhost " +
|
||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||
+ concatStringsSep(",", settings.systemFeatures.get()));
|
||||
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||
machinesReadyLock.unlock();
|
||||
return;
|
||||
}
|
||||
@@ -318,10 +331,13 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
for (auto & [name, output] : step->drv->outputs)
|
||||
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
||||
buildId, stepNr, name,
|
||||
output
|
||||
? std::optional { localStore->printStorePath(*output)}
|
||||
: std::nullopt);
|
||||
|
||||
if (status == bsBusy)
|
||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||
@@ -358,11 +374,23 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||
assert(result.logFile.find('\t') == std::string::npos);
|
||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||
buildId, stepNr, result.logFile));
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||
assert(res.size());
|
||||
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||
// If we've finished building, all the paths should be known
|
||||
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||
buildId, stepNr, name, localStore->printStorePath(output));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath)
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, build->id);
|
||||
@@ -463,6 +491,15 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||
isCachedBuild ? 1 : 0);
|
||||
|
||||
for (auto & [outputName, outputPath] : res.outputs) {
|
||||
txn.exec_params0
|
||||
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||
build->id,
|
||||
outputName,
|
||||
localStore->printStorePath(outputPath)
|
||||
);
|
||||
}
|
||||
|
||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||
|
||||
unsigned int productNr = 1;
|
||||
@@ -474,7 +511,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath);
|
||||
@@ -542,181 +579,168 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
std::ostringstream out;
|
||||
time_t now = time(0);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
{"nrRetries", nrRetries.load()},
|
||||
{"maxNrRetries", maxNrRetries.load()},
|
||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
{
|
||||
JSONObject root(out);
|
||||
time_t now = time(0);
|
||||
root.attr("status", "up");
|
||||
root.attr("time", time(0));
|
||||
root.attr("uptime", now - startedAt);
|
||||
root.attr("pid", getpid());
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
root.attr("nrQueuedBuilds", builds_->size());
|
||||
}
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||
root.attr("nrUnfinishedSteps", steps_->size());
|
||||
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||
}
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||
root.attr("nrRunnableSteps", runnable_->size());
|
||||
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||
}
|
||||
root.attr("nrActiveSteps", activeSteps_.lock()->size());
|
||||
root.attr("nrStepsBuilding", nrStepsBuilding);
|
||||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
||||
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
|
||||
root.attr("bytesSent", bytesSent);
|
||||
root.attr("bytesReceived", bytesReceived);
|
||||
root.attr("nrBuildsRead", nrBuildsRead);
|
||||
root.attr("buildReadTimeMs", buildReadTimeMs);
|
||||
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
|
||||
root.attr("nrBuildsDone", nrBuildsDone);
|
||||
root.attr("nrStepsStarted", nrStepsStarted);
|
||||
root.attr("nrStepsDone", nrStepsDone);
|
||||
root.attr("nrRetries", nrRetries);
|
||||
root.attr("maxNrRetries", maxNrRetries);
|
||||
if (nrStepsDone) {
|
||||
root.attr("totalStepTime", totalStepTime);
|
||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
}
|
||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
||||
root.attr("dispatchTimeMs", dispatchTimeMs);
|
||||
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
|
||||
root.attr("nrDbConnections", dbPool.count());
|
||||
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
|
||||
|
||||
{
|
||||
auto nested = root.object("machines");
|
||||
auto machines_(machines.lock());
|
||||
for (auto & i : *machines_) {
|
||||
auto & m(i.second);
|
||||
auto & s(m->state);
|
||||
auto nested2 = nested.object(m->sshName);
|
||||
nested2.attr("enabled", m->enabled);
|
||||
|
||||
{
|
||||
auto list = nested2.list("systemTypes");
|
||||
for (auto & s : m->systemTypes)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("supportedFeatures");
|
||||
for (auto & s : m->supportedFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("mandatoryFeatures");
|
||||
for (auto & s : m->mandatoryFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
nested2.attr("currentJobs", s->currentJobs);
|
||||
if (s->currentJobs == 0)
|
||||
nested2.attr("idleSince", s->idleSince);
|
||||
nested2.attr("nrStepsDone", s->nrStepsDone);
|
||||
if (m->state->nrStepsDone) {
|
||||
nested2.attr("totalStepTime", s->totalStepTime);
|
||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
||||
}
|
||||
|
||||
auto info(m->state->connectInfo.lock());
|
||||
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
|
||||
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
|
||||
nested2.attr("consecutiveFailures", info->consecutiveFailures);
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
{"currentJobs", s->currentJobs.load()},
|
||||
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||
{"consecutiveFailures", info->consecutiveFailures},
|
||||
};
|
||||
|
||||
if (s->currentJobs == 0)
|
||||
machine["idleSince"] = s->idleSince.load();
|
||||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
}
|
||||
statusJson["machines"][m->sshName] = machine;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("jobsets");
|
||||
auto jobsets_json = json::object();
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
|
||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
||||
nested2.attr("seconds", jobset.second->getSeconds());
|
||||
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||
{"shareUsed", jobset.second->shareUsed()},
|
||||
{"seconds", jobset.second->getSeconds()},
|
||||
};
|
||||
}
|
||||
statusJson["jobsets"] = jobsets_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto nested = root.object("machineTypes");
|
||||
auto machineTypesJson = json::object();
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
for (auto & i : *machineTypes_) {
|
||||
auto nested2 = nested.object(i.first);
|
||||
nested2.attr("runnable", i.second.runnable);
|
||||
nested2.attr("running", i.second.running);
|
||||
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||
{"runnable", i.second.runnable},
|
||||
{"running", i.second.running},
|
||||
};
|
||||
if (i.second.runnable > 0)
|
||||
nested2.attr("waitTime", i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck));
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
statusJson["machineTypes"] = machineTypesJson;
|
||||
}
|
||||
|
||||
auto store = getDestStore();
|
||||
|
||||
auto nested = root.object("store");
|
||||
|
||||
auto & stats = store->getStats();
|
||||
nested.attr("narInfoRead", stats.narInfoRead);
|
||||
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
|
||||
nested.attr("narInfoMissing", stats.narInfoMissing);
|
||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
||||
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
|
||||
nested.attr("narRead", stats.narRead);
|
||||
nested.attr("narReadBytes", stats.narReadBytes);
|
||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
||||
nested.attr("narWrite", stats.narWrite);
|
||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
||||
nested.attr("narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0);
|
||||
nested.attr("narCompressionSpeed", // MiB/s
|
||||
statusJson["store"] = {
|
||||
{"narInfoRead", stats.narInfoRead.load()},
|
||||
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||
{"narRead", stats.narRead.load()},
|
||||
{"narReadBytes", stats.narReadBytes.load()},
|
||||
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||
{"narWrite", stats.narWrite.load()},
|
||||
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
: 0.0},
|
||||
};
|
||||
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
auto nested2 = nested.object("s3");
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
nested2.attr("put", s3Stats.put);
|
||||
nested2.attr("putBytes", s3Stats.putBytes);
|
||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
||||
nested2.attr("putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("get", s3Stats.get);
|
||||
nested2.attr("getBytes", s3Stats.getBytes);
|
||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
||||
nested2.attr("getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("head", s3Stats.head);
|
||||
nested2.attr("costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
||||
auto jsonS3 = statusJson["s3"] = {
|
||||
{"put", s3Stats.put.load()},
|
||||
{"putBytes", s3Stats.putBytes.load()},
|
||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -725,7 +749,7 @@ void State::dumpStatus(Connection & conn)
|
||||
pqxx::work txn(conn);
|
||||
// FIXME: use PostgreSQL 9.5 upsert.
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
|
||||
txn.exec("notify status_dumped");
|
||||
txn.commit();
|
||||
}
|
||||
@@ -902,10 +926,17 @@ void State::run(BuildID buildOne)
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
@@ -950,7 +981,6 @@ int main(int argc, char * * argv)
|
||||
});
|
||||
|
||||
settings.verboseBuild = true;
|
||||
settings.lockCPU = false;
|
||||
|
||||
State state{metricsAddrOpt};
|
||||
if (status)
|
||||
|
||||
@@ -6,7 +6,46 @@
|
||||
|
||||
using namespace nix;
|
||||
|
||||
struct Extractor : ParseSink
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
@@ -15,7 +54,6 @@ struct Extractor : ParseSink
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
@@ -24,49 +62,24 @@ struct Extractor : ParseSink
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
void createRegularFile(const Path & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = FSAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const std::string & target) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "fs-accessor.hh"
|
||||
#include "source-accessor.hh"
|
||||
#include "types.hh"
|
||||
#include "serialise.hh"
|
||||
#include "hash.hh"
|
||||
|
||||
struct NarMemberData
|
||||
{
|
||||
nix::FSAccessor::Type type;
|
||||
nix::SourceAccessor::Type type;
|
||||
std::optional<uint64_t> fileSize;
|
||||
std::optional<std::string> contents;
|
||||
std::optional<nix::Hash> sha256;
|
||||
|
||||
@@ -10,26 +10,30 @@ using namespace nix;
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop();
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, format("queue monitor: %1%") % e.what());
|
||||
printError("queue monitor: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop()
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver buildsAdded(*conn, "builds_added");
|
||||
receiver buildsRestarted(*conn, "builds_restarted");
|
||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
||||
receiver buildsDeleted(*conn, "builds_deleted");
|
||||
receiver buildsBumped(*conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
@@ -39,17 +43,17 @@ void State::queueMonitorLoop()
|
||||
while (!quit) {
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
||||
bool done = getQueuedBuilds(conn, destStore, lastBuildId);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done && !quit) {
|
||||
conn->await_notification();
|
||||
conn.await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn->get_notifs();
|
||||
conn.get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||
@@ -61,11 +65,11 @@ void State::queueMonitorLoop()
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(*conn);
|
||||
processQueueChange(conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(*conn);
|
||||
processJobsetSharesChange(conn);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,13 +146,13 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
createBuild = [&](Build::ptr build) {
|
||||
prom.queue_build_loads.Increment();
|
||||
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
|
||||
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||
nrAdded++;
|
||||
newBuildsByID.erase(build->id);
|
||||
|
||||
if (!localStore->isValidPath(build->drvPath)) {
|
||||
/* Derivation has been GC'ed prematurely. */
|
||||
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
|
||||
printError("aborting GC'ed build %1%", build->id);
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
@@ -192,15 +196,19 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||
|
||||
if (!propagatedFrom) {
|
||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second) {
|
||||
auto res = txn.exec_params
|
||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
||||
localStore->printStorePath(*i.second.second));
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||
auto res = optOutputPath
|
||||
? txn.exec_params(
|
||||
std::string { common } + " and path = $1",
|
||||
localStore->printStorePath(*optOutputPath))
|
||||
: txn.exec_params(
|
||||
std::string { common } + " and drvPath = $1 and name = $2",
|
||||
localStore->printStorePath(ex.step->drvPath),
|
||||
outputName);
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -236,12 +244,10 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
/* If we didn't get a step, it means the step's outputs are
|
||||
all valid. So we mark this as a finished, cached build. */
|
||||
if (!step) {
|
||||
auto drv = localStore->readDerivation(build->drvPath);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||
|
||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||
addRoot(i.second);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
@@ -292,7 +298,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
try {
|
||||
createBuild(build);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
|
||||
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||
throw;
|
||||
}
|
||||
|
||||
@@ -302,7 +308,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||
the builder threads. */
|
||||
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
|
||||
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||
for (auto & r : newRunnable)
|
||||
makeRunnable(r);
|
||||
|
||||
@@ -315,7 +321,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
|
||||
prom.queue_checks_early_exits.Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prom.queue_checks_finished.Increment();
|
||||
@@ -358,13 +364,13 @@ void State::processQueueChange(Connection & conn)
|
||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||
auto b = currentIds.find(i->first);
|
||||
if (b == currentIds.end()) {
|
||||
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
|
||||
printInfo("discarding cancelled build %1%", i->first);
|
||||
i = builds_->erase(i);
|
||||
// FIXME: ideally we would interrupt active build steps here.
|
||||
continue;
|
||||
}
|
||||
if (i->second->globalPriority < b->second) {
|
||||
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
|
||||
printInfo("priority of build %1% increased", i->first);
|
||||
i->second->globalPriority = b->second;
|
||||
i->second->propagatePriorities();
|
||||
}
|
||||
@@ -464,10 +470,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
step->systemType = step->drv->platform;
|
||||
{
|
||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
||||
StringSet features;
|
||||
if (i != step->drv->env.end())
|
||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
||||
StringSet features = step->requiredSystemFeatures = step->parsedDrv->getRequiredSystemFeatures();
|
||||
if (step->preferLocalBuild)
|
||||
features.insert("local");
|
||||
if (!features.empty()) {
|
||||
@@ -481,26 +484,41 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
throw PreviousFailure{step};
|
||||
|
||||
/* Are all outputs valid? */
|
||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||
bool valid = true;
|
||||
DerivationOutputs missing;
|
||||
for (auto & i : step->drv->outputs)
|
||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
||||
valid = false;
|
||||
missing.insert_or_assign(i.first, i.second);
|
||||
}
|
||||
std::map<DrvOutput, std::optional<StorePath>> missing;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath))
|
||||
continue;
|
||||
valid = false;
|
||||
missing.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
/* Try to copy the missing paths from the local store or from
|
||||
substitutes. */
|
||||
if (!missing.empty()) {
|
||||
|
||||
size_t avail = 0;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we don't know the output path from the destination
|
||||
// store, see if the local store can tell us.
|
||||
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||
pathOpt = maybeRealisation->outPath;
|
||||
|
||||
if (!pathOpt) {
|
||||
// No hope of getting the store object if we don't know
|
||||
// the path.
|
||||
continue;
|
||||
}
|
||||
auto & path = *pathOpt;
|
||||
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||
avail++;
|
||||
else if (useSubstitutes) {
|
||||
SubstitutablePathInfos infos;
|
||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
||||
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||
if (infos.size() == 1)
|
||||
avail++;
|
||||
}
|
||||
@@ -508,26 +526,29 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (missing.size() == avail) {
|
||||
valid = true;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we found everything, then we should know the path
|
||||
// to every missing store object now.
|
||||
assert(pathOpt);
|
||||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
|
||||
if (localStore->isValidPath(*path))
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
else {
|
||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
localStore->ensurePath(*path);
|
||||
localStore->ensurePath(path);
|
||||
// FIXME: should copy directly from substituter to destStore.
|
||||
}
|
||||
|
||||
copyClosure(*localStore, *destStore,
|
||||
StorePathSet { *path },
|
||||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
@@ -535,13 +556,13 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath),
|
||||
e.what());
|
||||
valid = false;
|
||||
@@ -561,7 +582,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs) {
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
@@ -640,21 +661,23 @@ void State::processJobsetSharesChange(Connection & conn)
|
||||
}
|
||||
|
||||
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||
{
|
||||
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
auto r = txn.exec_params
|
||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||
"join BuildOutputs o on b.id = o.build "
|
||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(*output.second));
|
||||
localStore->printStorePath(output));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
|
||||
printMsg(lvlInfo, format("reusing build %d") % id);
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
BuildOutput res;
|
||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||
@@ -677,7 +700,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
@@ -704,5 +727,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
}
|
||||
|
||||
NarMemberDatas narMembers;
|
||||
return getBuildOutput(destStore, narMembers, drv);
|
||||
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||
}
|
||||
|
||||
@@ -21,6 +21,9 @@
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include "nar-extractor.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "machines.hh"
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
@@ -78,6 +81,8 @@ struct RemoteResult
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
||||
void updateWithBuildResult(const nix::BuildResult &);
|
||||
};
|
||||
|
||||
|
||||
@@ -231,17 +236,13 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||
|
||||
|
||||
struct Machine
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
bool enabled{true};
|
||||
|
||||
std::string sshName, sshKey;
|
||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
||||
unsigned int maxJobs = 1;
|
||||
float speedFactor = 1.0;
|
||||
std::string sshPublicHostKey;
|
||||
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||
we are not yet used to, but once we are, we don't need this. */
|
||||
std::string sshName;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
@@ -297,6 +298,12 @@ struct Machine
|
||||
std::regex r("^(ssh://|ssh-ng://)?localhost$");
|
||||
return std::regex_search(sshName, r);
|
||||
}
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -430,7 +437,7 @@ private:
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
@@ -485,13 +492,13 @@ private:
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop();
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn,
|
||||
@@ -501,7 +508,7 @@ private:
|
||||
void processQueueChange(Connection & conn);
|
||||
|
||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||
const nix::Derivation & drv);
|
||||
const nix::StorePath & drvPath);
|
||||
|
||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||
@@ -543,8 +550,7 @@ private:
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
||||
unsigned int repeats,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
|
||||
@@ -4,7 +4,6 @@ use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use List::SomeUtils qw(any);
|
||||
use Nix::Store;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
@@ -30,7 +29,7 @@ sub getChannelData {
|
||||
my $outputs = {};
|
||||
foreach my $output (@outputs) {
|
||||
my $outPath = $output->get_column("outpath");
|
||||
next if $checkValidity && !isValidPath($outPath);
|
||||
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||
$outputs->{$output->get_column("outname")} = $outPath;
|
||||
push @storePaths, $outPath;
|
||||
# Put the system type in the manifest (for top-level
|
||||
|
||||
@@ -285,6 +285,23 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
@@ -10,11 +10,10 @@ use File::Basename;
|
||||
use File::LibMagic;
|
||||
use File::stat;
|
||||
use Data::Dump qw(dump);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use JSON::PP;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use feature 'state';
|
||||
|
||||
@@ -78,14 +77,16 @@ sub build_GET {
|
||||
|
||||
$c->stash->{template} = 'build.tt';
|
||||
$c->stash->{isLocalStore} = isLocalStore();
|
||||
# XXX: If the derivation is content-addressed then this will always return
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
||||
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or die;
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||
if (defined $cachedBuildStep) {
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||
@@ -139,7 +140,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -148,7 +149,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -233,14 +234,25 @@ sub serveFile {
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "regular") {
|
||||
# Have the hosted data considered its own origin to avoid being a giant
|
||||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type.
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
my $type = $info->{mime_with_encoding};
|
||||
my $type = "text/plain";
|
||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||
my $ext = $1;
|
||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||
$type = ref $t ? $t->type : $t if $t;
|
||||
} else {
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
$type = $info->{mime_with_encoding};
|
||||
}
|
||||
$c->response->content_type($type);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -298,7 +310,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
|
||||
error($c, "This build is not finished yet.") unless $build->finished;
|
||||
my $output = $build->buildoutputs->find({name => $outputName});
|
||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||
gone($c, "Output is no longer available.") unless isValidPath $output->path;
|
||||
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||
|
||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
@@ -415,7 +427,7 @@ sub getDependencyGraph {
|
||||
};
|
||||
$$done{$path} = $node;
|
||||
my @refs;
|
||||
foreach my $ref (queryReferences($path)) {
|
||||
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||
next if $ref eq $path;
|
||||
next unless $runtime || $ref =~ /\.drv$/;
|
||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||
@@ -423,7 +435,7 @@ sub getDependencyGraph {
|
||||
}
|
||||
# Show in reverse topological order to flatten the graph.
|
||||
# Should probably do a proper BFS.
|
||||
my @sorted = reverse topoSortPaths(@refs);
|
||||
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||
}
|
||||
|
||||
@@ -436,7 +448,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
||||
my $build = $c->stash->{build};
|
||||
my $drvPath = $build->drvpath;
|
||||
|
||||
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
|
||||
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||
|
||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||
|
||||
@@ -451,7 +463,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
|
||||
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||
|
||||
my $done = {};
|
||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||
@@ -471,7 +483,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
||||
if (isLocalStore) {
|
||||
foreach my $out ($build->buildoutputs) {
|
||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||
unless isValidPath($out->path);
|
||||
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,11 @@ use List::Util qw[min max];
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||
|
||||
# Put this controller at top-level.
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
@@ -32,6 +35,7 @@ sub noLoginNeeded {
|
||||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
@@ -77,7 +81,7 @@ sub begin :Private {
|
||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
@@ -355,6 +359,33 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
my ($self, $c, $realisation) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = $rawRealisation;
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($self, $c, $narinfo) = @_;
|
||||
|
||||
@@ -366,7 +397,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||
|
||||
die("Hash length was not 32") if length($hash) != 32;
|
||||
my $path = queryPathFromHashPart($hash);
|
||||
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||
|
||||
if (!$path) {
|
||||
$c->response->status(404);
|
||||
@@ -524,7 +555,7 @@ sub log :Local :Args(1) {
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
|
||||
@@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
||||
, "jobset.enabled" => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"]
|
||||
, join => ["project", "jobset"]
|
||||
, join => {"jobset" => "project"}
|
||||
})];
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,11 @@ our @EXPORT = qw(
|
||||
registerRoot
|
||||
restartBuilds
|
||||
run
|
||||
$MACHINE_LOCAL_STORE
|
||||
);
|
||||
|
||||
our $MACHINE_LOCAL_STORE = Nix::Store->new();
|
||||
|
||||
|
||||
sub getHydraHome {
|
||||
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
|
||||
@@ -187,6 +190,10 @@ sub findLog {
|
||||
|
||||
return undef if scalar @outPaths == 0;
|
||||
|
||||
# Filter out any NULLs. Content-addressed derivations
|
||||
# that haven't built yet or failed to build may have a NULL outPath.
|
||||
@outPaths = grep {defined} @outPaths;
|
||||
|
||||
my @steps = $c->model('DB::BuildSteps')->search(
|
||||
{ path => { -in => [@outPaths] } },
|
||||
{ select => ["drvpath"]
|
||||
@@ -494,7 +501,7 @@ sub restartBuilds {
|
||||
$builds = $builds->search({ finished => 1 });
|
||||
|
||||
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
|
||||
next if !isValidPath($build->drvpath);
|
||||
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
registerRoot $build->drvpath;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -38,9 +37,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedBazaarInputs')->create(
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -58,7 +57,7 @@ sub fetchInput {
|
||||
{uri => $uri, revision => $revision},
|
||||
{rows => 1});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -75,8 +74,8 @@ sub fetchInput {
|
||||
die "darcs changes --count failed" if $? != 0;
|
||||
|
||||
system "rm", "-rf", "$tmpDir/export/_darcs";
|
||||
$storePath = addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = queryPathHash($storePath);
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
|
||||
$sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
|
||||
@@ -186,9 +186,9 @@ sub fetchInput {
|
||||
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
|
||||
{rows => 1});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$revision = $cachedInput->revision;
|
||||
@@ -217,7 +217,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
|
||||
|
||||
# FIXME: time window between nix-prefetch-git and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedGitInputs')->update_or_create(
|
||||
|
||||
@@ -88,10 +88,6 @@ sub buildQueued {
|
||||
common(@_, [], 0);
|
||||
}
|
||||
|
||||
sub buildStarted {
|
||||
common(@_, [], 1);
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
common(@_, 2);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use File::Path;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::Exec;
|
||||
use Nix::Store;
|
||||
use Fcntl qw(:flock);
|
||||
|
||||
sub supportedInputTypes {
|
||||
@@ -68,9 +67,9 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
|
||||
{uri => $uri, branch => $branch, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
} else {
|
||||
@@ -85,7 +84,7 @@ sub fetchInput {
|
||||
($sha256, $storePath) = split ' ', $stdout;
|
||||
|
||||
# FIXME: time window between nix-prefetch-hg and addTempRoot.
|
||||
addTempRoot($storePath);
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedHgInputs')->update_or_create(
|
||||
|
||||
@@ -5,7 +5,6 @@ use warnings;
|
||||
use parent 'Hydra::Plugin';
|
||||
use POSIX qw(strftime);
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -30,7 +29,7 @@ sub fetchInput {
|
||||
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
|
||||
{rows => 1, order_by => "lastseen DESC"});
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
$sha256 = $cachedInput->sha256hash;
|
||||
$timestamp = $cachedInput->timestamp;
|
||||
@@ -46,7 +45,7 @@ sub fetchInput {
|
||||
}
|
||||
chomp $storePath;
|
||||
|
||||
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
|
||||
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
|
||||
|
||||
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
|
||||
{srcpath => $uri, sha256hash => $sha256});
|
||||
|
||||
@@ -7,7 +7,6 @@ use Digest::SHA qw(sha256_hex);
|
||||
use Hydra::Helper::Exec;
|
||||
use Hydra::Helper::Nix;
|
||||
use IPC::Run;
|
||||
use Nix::Store;
|
||||
|
||||
sub supportedInputTypes {
|
||||
my ($self, $inputTypes) = @_;
|
||||
@@ -45,7 +44,7 @@ sub fetchInput {
|
||||
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
|
||||
{uri => $uri, revision => $revision});
|
||||
|
||||
addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
|
||||
|
||||
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
|
||||
$storePath = $cachedInput->storepath;
|
||||
@@ -62,16 +61,16 @@ sub fetchInput {
|
||||
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
|
||||
|
||||
if ($type eq "svn-checkout") {
|
||||
$storePath = addToStore($wcPath, 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
|
||||
} else {
|
||||
# Hm, if the Nix Perl bindings supported filters in
|
||||
# addToStore(), then we wouldn't need to make a copy here.
|
||||
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
|
||||
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
|
||||
$storePath = addToStore("$tmpDir/source", 1, "sha256");
|
||||
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
|
||||
}
|
||||
|
||||
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
|
||||
|
||||
$self->{db}->txn_do(sub {
|
||||
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(
|
||||
|
||||
@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
is_nullable: 1
|
||||
|
||||
=cut
|
||||
|
||||
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
||||
|
||||
@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 0
|
||||
is_nullable: 1
|
||||
|
||||
=cut
|
||||
|
||||
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
||||
|
||||
@@ -216,7 +216,7 @@ sub json_hint {
|
||||
|
||||
sub _authenticator() {
|
||||
my $authenticator = Crypt::Passphrase->new(
|
||||
encoder => 'Argon2',
|
||||
encoder => { module => 'Argon2', output_size => 16 },
|
||||
validators => [
|
||||
(sub {
|
||||
my ($password, $hash) = @_;
|
||||
|
||||
@@ -6,8 +6,7 @@ use File::Basename;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use MIME::Base64;
|
||||
use Nix::Manifest;
|
||||
use Nix::Store;
|
||||
use Nix::Utils;
|
||||
use Hydra::Helper::Nix;
|
||||
use base qw/Catalyst::View/;
|
||||
|
||||
sub process {
|
||||
@@ -17,7 +16,7 @@ sub process {
|
||||
|
||||
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
|
||||
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
|
||||
my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1);
|
||||
|
||||
my $info;
|
||||
$info .= "StorePath: $storePath\n";
|
||||
@@ -28,8 +27,8 @@ sub process {
|
||||
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
|
||||
if (defined $deriver) {
|
||||
$info .= "Deriver: " . basename $deriver . "\n";
|
||||
if (isValidPath($deriver)) {
|
||||
my $drv = derivationFromPath($deriver);
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) {
|
||||
my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver);
|
||||
$info .= "System: $drv->{platform}\n";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
#include <pqxx/pqxx>
|
||||
|
||||
#include "environment-variables.hh"
|
||||
#include "util.hh"
|
||||
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "file-system.hh"
|
||||
#include "util.hh"
|
||||
|
||||
struct HydraConfig
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog" role="document">
|
||||
<div class="modal-content">
|
||||
<form>
|
||||
<form id="signin-form">
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<label for="username" class="col-form-label">User name</label>
|
||||
@@ -45,7 +45,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
|
||||
<button type="submit" class="btn btn-primary">Sign in</button>
|
||||
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
||||
</div>
|
||||
</form>
|
||||
@@ -57,10 +57,11 @@
|
||||
|
||||
function finishSignOut() { }
|
||||
|
||||
$("#do-signin").click(function() {
|
||||
$("#signin-form").submit(function(e) {
|
||||
e.preventDefault();
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/login') %]",
|
||||
data: $(this).parents("form").serialize(),
|
||||
data: $(this).serialize(),
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
@@ -82,7 +83,7 @@
|
||||
function onGoogleSignIn(googleUser) {
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/google-login') %]",
|
||||
data: "id_token=" + googleUser.getAuthResponse().id_token,
|
||||
data: "id_token=" + googleUser.credential,
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
@@ -91,9 +92,6 @@
|
||||
return false;
|
||||
};
|
||||
|
||||
$("#google-signin").click(function() {
|
||||
$(".g-signin2:first-child > div").click();
|
||||
});
|
||||
</script>
|
||||
[% END %]
|
||||
|
||||
|
||||
@@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
|
||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||
[% IF bi1.type == "git" %]
|
||||
<tr><td>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
|
||||
</td></tr>
|
||||
[% ELSE %]
|
||||
<tr><td>
|
||||
|
||||
@@ -133,8 +133,10 @@
|
||||
[% ELSE %]
|
||||
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
|
||||
[% IF c.config.enable_google_login %]
|
||||
<div style="display: none" class="g-signin2" data-onsuccess="onGoogleSignIn" data-theme="dark"></div>
|
||||
<a class="dropdown-item" href="#" id="google-signin">Sign in with Google</a>
|
||||
<script src="https://accounts.google.com/gsi/client" async defer></script>
|
||||
<div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-auto_prompt="false" data-callback="onGoogleSignIn">
|
||||
</div>
|
||||
<div class="g_id_signin" data-type="standard"></div>
|
||||
<div class="dropdown-divider"></div>
|
||||
[% END %]
|
||||
[% IF c.config.github_client_id %]
|
||||
|
||||
@@ -85,14 +85,14 @@ sub attrsToSQL {
|
||||
# Fetch a store path from 'eval_substituter' if not already present.
|
||||
sub getPath {
|
||||
my ($path) = @_;
|
||||
return 1 if isValidPath($path);
|
||||
return 1 if $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
my $substituter = $config->{eval_substituter};
|
||||
|
||||
system("nix", "--experimental-features", "nix-command", "copy", "--from", $substituter, "--", $path)
|
||||
if defined $substituter;
|
||||
|
||||
return isValidPath($path);
|
||||
return $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
}
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ sub fetchInputBuild {
|
||||
, version => $version
|
||||
, outputName => $mainOutput->name
|
||||
};
|
||||
if (isValidPath($prevBuild->drvpath)) {
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($prevBuild->drvpath)) {
|
||||
$result->{drvPath} = $prevBuild->drvpath;
|
||||
}
|
||||
|
||||
@@ -233,7 +233,7 @@ sub fetchInputEval {
|
||||
my $out = $build->buildoutputs->find({ name => "out" });
|
||||
next unless defined $out;
|
||||
# FIXME: Should we fail if the path is not valid?
|
||||
next unless isValidPath($out->path);
|
||||
next unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
$jobs->{$build->get_column('job')} = $out->path;
|
||||
}
|
||||
|
||||
@@ -438,13 +438,17 @@ sub checkBuild {
|
||||
# new build to be scheduled if the meta.maintainers field is
|
||||
# changed?
|
||||
if (defined $prevEval) {
|
||||
my $pathOrDrvConstraint = defined $firstOutputPath
|
||||
? { path => $firstOutputPath }
|
||||
: { drvPath => $drvPath };
|
||||
|
||||
my ($prevBuild) = $prevEval->builds->search(
|
||||
# The "project" and "jobset" constraints are
|
||||
# semantically unnecessary (because they're implied by
|
||||
# the eval), but they give a factor 1000 speedup on
|
||||
# the Nixpkgs jobset with PostgreSQL.
|
||||
{ jobset_id => $jobset->get_column('id'), job => $jobName,
|
||||
name => $firstOutputName, path => $firstOutputPath },
|
||||
name => $firstOutputName, %$pathOrDrvConstraint },
|
||||
{ rows => 1, columns => ['id', 'finished'], join => ['buildoutputs'] });
|
||||
if (defined $prevBuild) {
|
||||
#print STDERR " already scheduled/built as build ", $prevBuild->id, "\n";
|
||||
|
||||
@@ -5,7 +5,6 @@ use warnings;
|
||||
use File::Path;
|
||||
use File::stat;
|
||||
use File::Basename;
|
||||
use Nix::Store;
|
||||
use Hydra::Config;
|
||||
use Hydra::Schema;
|
||||
use Hydra::Helper::Nix;
|
||||
@@ -47,7 +46,7 @@ sub keepBuild {
|
||||
$build->finished && ($build->buildstatus == 0 || $build->buildstatus == 6))
|
||||
{
|
||||
foreach my $path (split / /, $build->get_column('outpaths')) {
|
||||
if (isValidPath($path)) {
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($path)) {
|
||||
addRoot $path;
|
||||
} else {
|
||||
print STDERR " warning: output ", $path, " has disappeared\n" if $build->finished;
|
||||
@@ -55,7 +54,7 @@ sub keepBuild {
|
||||
}
|
||||
}
|
||||
if (!$build->finished || ($keepFailedDrvs && $build->buildstatus != 0)) {
|
||||
if (isValidPath($build->drvpath)) {
|
||||
if ($MACHINE_LOCAL_STORE->isValidPath($build->drvpath)) {
|
||||
addRoot $build->drvpath;
|
||||
} else {
|
||||
print STDERR " warning: derivation ", $build->drvpath, " has disappeared\n";
|
||||
|
||||
@@ -247,7 +247,7 @@ create trigger BuildBumped after update on Builds for each row
|
||||
create table BuildOutputs (
|
||||
build integer not null,
|
||||
name text not null,
|
||||
path text not null,
|
||||
path text,
|
||||
primary key (build, name),
|
||||
foreign key (build) references Builds(id) on delete cascade
|
||||
);
|
||||
@@ -303,7 +303,7 @@ create table BuildStepOutputs (
|
||||
build integer not null,
|
||||
stepnr integer not null,
|
||||
name text not null,
|
||||
path text not null,
|
||||
path text,
|
||||
primary key (build, stepnr, name),
|
||||
foreign key (build) references Builds(id) on delete cascade,
|
||||
foreign key (build, stepnr) references BuildSteps(build, stepnr) on delete cascade
|
||||
|
||||
3
src/sql/upgrade-83.sql
Normal file
3
src/sql/upgrade-83.sql
Normal file
@@ -0,0 +1,3 @@
|
||||
-- This index was introduced in a migration but was never recorded in
|
||||
-- hydra.sql (the source of truth), which is why `if exists` is required.
|
||||
drop index if exists IndexBuildOutputsOnPath;
|
||||
4
src/sql/upgrade-84.sql
Normal file
4
src/sql/upgrade-84.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
-- CA derivations do not have statically known output paths. The values
|
||||
-- are only filled in after the build runs.
|
||||
ALTER TABLE BuildStepOutputs ALTER COLUMN path DROP NOT NULL;
|
||||
ALTER TABLE BuildOutputs ALTER COLUMN path DROP NOT NULL;
|
||||
@@ -54,13 +54,14 @@ subtest "/job/PROJECT/JOBSET/JOB/shield" => sub {
|
||||
|
||||
subtest "/job/PROJECT/JOBSET/JOB/prometheus" => sub {
|
||||
my $response = request(GET '/job/' . $project->name . '/' . $jobset->name . '/' . $build->job . '/prometheus');
|
||||
ok($response->is_success, "The page showing the job's prometheus data returns 200.");
|
||||
my $metrics = $response->content;
|
||||
|
||||
ok($metrics =~ m/hydra_job_failed\{.*\} 0/);
|
||||
ok($metrics =~ m/hydra_job_completion_time\{.*\} [\d]+/);
|
||||
ok($metrics =~ m/hydra_build_closure_size\{.*\} 96/);
|
||||
ok($metrics =~ m/hydra_build_output_size\{.*\} 96/);
|
||||
ok($response->is_success, "The page showing the job's prometheus data returns 200.");
|
||||
|
||||
my $metrics = $response->content;
|
||||
like($metrics, qr/hydra_job_failed\{.*\} 0/);
|
||||
like($metrics, qr/hydra_job_completion_time\{.*\} [\d]+/);
|
||||
like($metrics, qr/hydra_build_closure_size\{.*\} 96/);
|
||||
like($metrics, qr/hydra_build_output_size\{.*\} 96/);
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -186,7 +186,7 @@ subtest 'Update jobset "job" to have an invalid input type' => sub {
|
||||
})
|
||||
);
|
||||
ok(!$jobsetupdate->is_success);
|
||||
ok($jobsetupdate->content =~ m/Invalid input type.*valid types:/);
|
||||
like($jobsetupdate->content, qr/Invalid input type.*valid types:/);
|
||||
};
|
||||
|
||||
|
||||
|
||||
30
t/Hydra/Controller/User/dashboard.t
Normal file
30
t/Hydra/Controller/User/dashboard.t
Normal file
@@ -0,0 +1,30 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
my $ctx = test_context();
|
||||
use HTTP::Request::Common;
|
||||
use Test2::V0;
|
||||
use Catalyst::Test ();
|
||||
Catalyst::Test->import('Hydra');
|
||||
require Hydra::Schema;
|
||||
require Hydra::Model::DB;
|
||||
my $db = $ctx->db();
|
||||
my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@invalid.org', password => '!' });
|
||||
$user->setPassword('foobar');
|
||||
my $builds = $ctx->makeAndEvaluateJobset(
|
||||
expression => "basic.nix",
|
||||
build => 1
|
||||
);
|
||||
my $login = request(POST '/login', Referer => 'http://localhost', Content => {
|
||||
username => 'alice',
|
||||
password => 'foobar',
|
||||
});
|
||||
is($login->code, 302);
|
||||
my $cookie = $login->header("set-cookie");
|
||||
my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie);
|
||||
ok($my_jobs->is_success);
|
||||
my $content = $my_jobs->content();
|
||||
like($content, qr/empty_dir/);
|
||||
ok(!($content =~ /fails/));
|
||||
ok(!($content =~ /succeed_with_failed/));
|
||||
done_testing;
|
||||
@@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub {
|
||||
ok($runlog->did_succeed(), "The process did succeed.");
|
||||
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
|
||||
is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
|
||||
is($runlog->exit_code, 0, "This command should have succeeded.");
|
||||
|
||||
subtest "Validate the run log file exists" => sub {
|
||||
|
||||
@@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub {
|
||||
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
|
||||
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
|
||||
is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
|
||||
is($runlog->exit_code, undef, "This command should not have executed.");
|
||||
is($runlog->error_number, 2, "This command failed to exec.");
|
||||
};
|
||||
|
||||
@@ -55,7 +55,7 @@ subtest "Starting a process" => sub {
|
||||
ok($runlog->is_running(), "The process is running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, undef, "The end time is undefined.");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
@@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, 0, "The exit code is 0.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
@@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, 85, "The exit code is 85.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
@@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, 15, "Signal 15 was sent.");
|
||||
@@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, 9, "The signal is 9.");
|
||||
@@ -134,8 +134,8 @@ subtest "The process failed to start" => sub {
|
||||
ok(!$runlog->is_running(), "The process is running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->error_number, 2, "The error number is saved");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
|
||||
@@ -25,11 +25,11 @@ subtest "requeue" => sub {
|
||||
|
||||
$task->requeue();
|
||||
is($task->attempts, 2, "We should have stored a second retry");
|
||||
is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step");
|
||||
is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step");
|
||||
|
||||
$task->requeue();
|
||||
is($task->attempts, 3, "We should have stored a third retry");
|
||||
is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step");
|
||||
is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step");
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -101,7 +101,7 @@ subtest "save_task" => sub {
|
||||
is($retry->pluginname, "FooPluginName", "Plugin name should match");
|
||||
is($retry->payload, "1", "Payload should match");
|
||||
is($retry->attempts, 1, "We've had one attempt");
|
||||
is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away");
|
||||
is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away");
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -115,7 +115,7 @@ subtest "evaluation" => sub {
|
||||
my $build = decode_json(request_json({ uri => "/build/" . $evals->[0]->{builds}->[0] })->content());
|
||||
is($build->{job}, "job", "The build's job name is job");
|
||||
is($build->{finished}, 0, "The build isn't finished yet");
|
||||
ok($build->{buildoutputs}->{out}->{path} =~ /\/nix\/store\/[a-zA-Z0-9]{32}-job$/, "The build's outpath is in the Nix store and named 'job'");
|
||||
like($build->{buildoutputs}->{out}->{path}, qr/\/nix\/store\/[a-zA-Z0-9]{32}-job$/, "The build's outpath is in the Nix store and named 'job'");
|
||||
|
||||
subtest "search" => sub {
|
||||
my $search_project = decode_json(request_json({ uri => "/search/?query=sample" })->content());
|
||||
|
||||
63
t/content-addressed/basic.t
Normal file
63
t/content-addressed/basic.t
Normal file
@@ -0,0 +1,63 @@
|
||||
use feature 'unicode_strings';
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
|
||||
my %ctx = test_init(
|
||||
nix_config => qq|
|
||||
experimental-features = ca-derivations
|
||||
|,
|
||||
);
|
||||
|
||||
require Hydra::Schema;
|
||||
require Hydra::Model::DB;
|
||||
|
||||
use JSON::MaybeXS;
|
||||
|
||||
use HTTP::Request::Common;
|
||||
use Test2::V0;
|
||||
require Catalyst::Test;
|
||||
Catalyst::Test->import('Hydra');
|
||||
|
||||
my $db = Hydra::Model::DB->new;
|
||||
hydra_setup($db);
|
||||
|
||||
my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
|
||||
|
||||
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||
|
||||
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix should exit with return code 0");
|
||||
is(nrQueuedBuildsForJobset($jobset), 6, "Evaluating jobs/content-addressed.nix should result in 6 builds");
|
||||
|
||||
for my $build (queuedBuildsForJobset($jobset)) {
|
||||
ok(runBuild($build), "Build '".$build->job."' from jobs/content-addressed.nix should exit with code 0");
|
||||
my $newbuild = $db->resultset('Builds')->find($build->id);
|
||||
is($newbuild->finished, 1, "Build '".$build->job."' from jobs/content-addressed.nix should be finished.");
|
||||
my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : $build->job =~ /FailingCA/ ? 2 : 0;
|
||||
is($newbuild->buildstatus, $expected, "Build '".$build->job."' from jobs/content-addressed.nix should have buildstatus $expected.");
|
||||
|
||||
my $response = request("/build/".$build->id);
|
||||
ok($response->is_success, "The 'build' page for build '".$build->job."' should load properly");
|
||||
|
||||
if ($newbuild->buildstatus == 0) {
|
||||
my $buildOutputs = $newbuild->buildoutputs;
|
||||
for my $output ($newbuild->buildoutputs) {
|
||||
# XXX: This hardcodes /nix/store/.
|
||||
# It's fine because in practice the nix store for the tests will be of
|
||||
# the form `/some/thing/nix/store/`, but it would be cleaner if there
|
||||
# was a way to query Nix for its store dir?
|
||||
like(
|
||||
$output->path, qr|/nix/store/|,
|
||||
"Output '".$output->name."' of build '".$build->job."' should be a valid store path"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# XXX: deststoredir is undefined: Use of uninitialized value $ctx{"deststoredir"} in concatenation (.) or string at t/content-addressed/basic.t line 58.
|
||||
# XXX: This test seems to not do what it seems to be doing. See documentation: https://metacpan.org/pod/Test2::V0#isnt($got,-$do_not_want,-$name)
|
||||
isnt(<$ctx{deststoredir}/realisations/*>, "", "The destination store should have the realisations of the built derivations registered");
|
||||
|
||||
done_testing;
|
||||
|
||||
28
t/content-addressed/without-experimental-feature.t
Normal file
28
t/content-addressed/without-experimental-feature.t
Normal file
@@ -0,0 +1,28 @@
|
||||
use feature 'unicode_strings';
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
|
||||
my %ctx = test_init();
|
||||
|
||||
require Hydra::Schema;
|
||||
require Hydra::Model::DB;
|
||||
|
||||
use JSON::MaybeXS;
|
||||
|
||||
use HTTP::Request::Common;
|
||||
use Test2::V0;
|
||||
require Catalyst::Test;
|
||||
Catalyst::Test->import('Hydra');
|
||||
|
||||
my $db = Hydra::Model::DB->new;
|
||||
hydra_setup($db);
|
||||
|
||||
my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
|
||||
|
||||
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||
|
||||
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix without the experimental feature should exit with return code 0");
|
||||
is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/content-addressed.nix without the experimental Nix feature should result in 0 build");
|
||||
|
||||
done_testing;
|
||||
@@ -4,6 +4,8 @@ with import ./config.nix;
|
||||
mkDerivation {
|
||||
name = "empty-dir";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
meta.maintainers = [ "alice@invalid.org" ];
|
||||
meta.outPath = "${placeholder "out"}";
|
||||
};
|
||||
|
||||
fails =
|
||||
|
||||
@@ -6,4 +6,9 @@ rec {
|
||||
system = builtins.currentSystem;
|
||||
PATH = path;
|
||||
} // args);
|
||||
mkContentAddressedDerivation = args: mkDerivation ({
|
||||
__contentAddressed = true;
|
||||
outputHashMode = "recursive";
|
||||
outputHashAlgo = "sha256";
|
||||
} // args);
|
||||
}
|
||||
|
||||
42
t/jobs/content-addressed.nix
Normal file
42
t/jobs/content-addressed.nix
Normal file
@@ -0,0 +1,42 @@
|
||||
let cfg = import ./config.nix; in
|
||||
rec {
|
||||
empty_dir =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "empty-dir";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
fails =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "fails";
|
||||
builder = ./fail.sh;
|
||||
};
|
||||
|
||||
succeed_with_failed =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "succeed-with-failed";
|
||||
builder = ./succeed-with-failed.sh;
|
||||
};
|
||||
|
||||
caDependingOnCA =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "ca-depending-on-ca";
|
||||
builder = ./dir-with-file-builder.sh;
|
||||
FOO = empty_dir;
|
||||
};
|
||||
|
||||
caDependingOnFailingCA =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "ca-depending-on-failing-ca";
|
||||
builder = ./dir-with-file-builder.sh;
|
||||
FOO = fails;
|
||||
};
|
||||
|
||||
nonCaDependingOnCA =
|
||||
cfg.mkDerivation {
|
||||
name = "non-ca-depending-on-ca";
|
||||
builder = ./dir-with-file-builder.sh;
|
||||
FOO = empty_dir;
|
||||
};
|
||||
}
|
||||
|
||||
4
t/jobs/dir-with-file-builder.sh
Executable file
4
t/jobs/dir-with-file-builder.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#! /bin/sh
|
||||
|
||||
mkdir $out
|
||||
echo foo > $out/a-file
|
||||
@@ -1,6 +1,3 @@
|
||||
#! /bin/sh
|
||||
|
||||
# Workaround for https://github.com/NixOS/nix/pull/6051
|
||||
echo "some output"
|
||||
|
||||
mkdir $out
|
||||
|
||||
@@ -39,7 +39,11 @@ use Hydra::Helper::Exec;
|
||||
sub new {
|
||||
my ($class, %opts) = @_;
|
||||
|
||||
my $dir = File::Temp->newdir();
|
||||
my $deststoredir;
|
||||
|
||||
# Cleanup will be managed by yath. By the default it will be cleaned
|
||||
# up, but can be kept to aid in debugging test failures.
|
||||
my $dir = File::Temp->newdir(CLEANUP => 0);
|
||||
|
||||
$ENV{'HYDRA_DATA'} = "$dir/hydra-data";
|
||||
mkdir $ENV{'HYDRA_DATA'};
|
||||
@@ -53,6 +57,7 @@ sub new {
|
||||
my $hydra_config = $opts{'hydra_config'} || "";
|
||||
$hydra_config = "queue_runner_metrics_address = 127.0.0.1:0\n" . $hydra_config;
|
||||
if ($opts{'use_external_destination_store'} // 1) {
|
||||
$deststoredir = "$dir/nix/dest-store";
|
||||
$hydra_config = "store_uri = file://$dir/nix/dest-store\n" . $hydra_config;
|
||||
}
|
||||
|
||||
@@ -79,7 +84,8 @@ sub new {
|
||||
nix_state_dir => $nix_state_dir,
|
||||
nix_log_dir => $nix_log_dir,
|
||||
testdir => abs_path(dirname(__FILE__) . "/.."),
|
||||
jobsdir => abs_path(dirname(__FILE__) . "/../jobs")
|
||||
jobsdir => abs_path(dirname(__FILE__) . "/../jobs"),
|
||||
deststoredir => $deststoredir,
|
||||
}, $class;
|
||||
|
||||
if ($opts{'before_init'}) {
|
||||
|
||||
@@ -8,7 +8,7 @@ my $binarycachedir = File::Temp->newdir();
|
||||
|
||||
my $ctx = test_context(
|
||||
nix_config => qq|
|
||||
experimental-features = nix-command
|
||||
experimental-features = nix-command ca-derivations
|
||||
substituters = file://${binarycachedir}?trusted=1
|
||||
|,
|
||||
hydra_config => q|
|
||||
|
||||
@@ -3,7 +3,6 @@ use warnings;
|
||||
use File::Basename;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Helper::Nix;
|
||||
use Nix::Store;
|
||||
use Cwd;
|
||||
|
||||
my $db = Hydra::Model::DB->new;
|
||||
|
||||
Reference in New Issue
Block a user