Compare commits

..

1 Commits

Author SHA1 Message Date
Eelco Dolstra
0118770092 Use the lazy-trees branch of Nix 2022-11-24 12:04:55 +01:00
200 changed files with 3228 additions and 5301 deletions

1
.envrc
View File

@@ -1 +0,0 @@
use flake

View File

@@ -1,27 +1,14 @@
name: "Test"
on:
pull_request:
merge_group:
push:
branches:
- master
jobs:
tests:
strategy:
matrix:
include:
- system: x86_64-linux
runner: ubuntu-latest
- system: aarch64-linux
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v31
with:
extra_nix_config: |
extra-systems = ${{ matrix.system }}
- uses: DeterminateSystems/magic-nix-cache-action@main
- run: nix-build -A checks.${{ matrix.system }}.build -A checks.${{ matrix.system }}.validate-openapi
- uses: cachix/install-nix-action@v17
#- run: nix flake check
- run: nix-build -A checks.x86_64-linux.build -A checks.x86_64-linux.validate-openapi

View File

@@ -1,28 +0,0 @@
name: "Update Flakes"
on:
schedule:
# Run weekly on Monday at 00:00 UTC
- cron: '0 0 * * 1'
workflow_dispatch:
jobs:
update-flakes:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v31
- name: Update flake inputs
run: nix flake update
- name: Create Pull Request
uses: peter-evans/create-pull-request@v5
with:
commit-message: "flake.lock: Update"
title: "Update flake inputs"
body: |
Automated flake input updates.
This PR was automatically created by the update-flakes workflow.
branch: update-flakes
delete-branch: true

48
.gitignore vendored
View File

@@ -1,13 +1,47 @@
/.pls_cache
*.o
*~
/.direnv/
.test_info.*
/src/root/static/bootstrap
/src/root/static/fontawesome
/src/root/static/js/flot
Makefile
Makefile.in
.deps
.hydra-data
/config.guess
/config.log
/config.status
/config.sub
/configure
/depcomp
/libtool
/ltmain.sh
/autom4te.cache
/aclocal.m4
/missing
/install-sh
/src/sql/hydra-postgresql.sql
/src/sql/hydra-sqlite.sql
/src/sql/tmp.sqlite
.hydra-data
/src/hydra-eval-jobs/hydra-eval-jobs
/src/root/static/bootstrap
/src/root/static/js/flot
/tests
/doc/manual/images
/doc/manual/manual.html
/doc/manual/manual.pdf
/t/.bzr*
/t/.git*
/t/.hg*
/t/nix
/t/data
/t/jobs/config.nix
t/jobs/declarative/project.json
/inst
hydra-config.h
hydra-config.h.in
result
result-*
outputs
config
stamp-h1
src/hydra-evaluator/hydra-evaluator
src/hydra-queue-runner/hydra-queue-runner
src/root/static/fontawesome/
src/root/static/bootstrap*/

View File

@@ -2,9 +2,3 @@ theme = community
# 5 is the least complainy, 1 is the most complainy
severity = 1
# Disallow backticks - use IPC::Run3 instead for better security
include = InputOutput::ProhibitBacktickOperators
# Prohibit shell-invoking system() and exec() - use list form or IPC::Run3 instead
include = Hydra::ProhibitShellInvokingSystemCalls

2
.yath.rc Normal file
View File

@@ -0,0 +1,2 @@
[test]
-I=rel(t/lib)

8
Makefile.am Normal file
View File

@@ -0,0 +1,8 @@
SUBDIRS = src t doc
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
DIST_SUBDIRS = $(SUBDIRS)
EXTRA_DIST = hydra-module.nix
install-data-local: hydra-module.nix
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/

View File

@@ -23,7 +23,7 @@ Running Hydra is currently only supported on NixOS. The [hydra module](https://g
}
```
### Creating An Admin User
Once the Hydra service has been configured as above and activated, you should already be able to access the UI interface at the specified URL. However some actions require an admin user which has to be created first:
Once the Hydra service has been configured as above and activate you should already be able to access the UI interface at the specified URL. However some actions require an admin user which has to be created first:
```
$ su - hydra
@@ -39,16 +39,16 @@ In order to evaluate and build anything you need to create _projects_ that conta
#### Creating A Project
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
- **Identifier**: `hello-project`
- **Identifier**: `hello`
- **Display name**: `hello`
- **Description**: `hello project`
Click "_Create project_".
#### Creating A Jobset
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Fill the form with the following values:
- **Identifier**: `hello-project`
- **Identifier**: `hello`
- **Nix expression**: `examples/hello.nix` in `hydra`
- **Check interval**: 60
- **Scheduling shares**: 1
@@ -57,7 +57,7 @@ We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_
- **Input name**: `nixpkgs`
- **Type**: `Git checkout`
- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
- **Value**: `https://github.com/nixos/nixpkgs-channels nixos-20.03`
- **Input name**: `hydra`
- **Type**: `Git checkout`
@@ -72,32 +72,28 @@ Make sure **State** at the top of the page is set to "_Enabled_" and click on "_
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
```
$ nix build
$ nix-build
```
### Development Environment
You can use the provided shell.nix to get a working development environment:
```
$ nix develop
$ ln -svf ../../../build/src/bootstrap src/root/static/bootstrap
$ ln -svf ../../../build/src/fontawesome src/root/static/fontawesome
$ ln -svf ../../../../build/src/flot src/root/static/js/flot
$ meson setup build
$ ninja -C build
$ nix-shell
$ ./bootstrap
$ configurePhase # NOTE: not ./configure
$ make
```
The development environment can also automatically be established using [nix-direnv](https://github.com/nix-community/nix-direnv).
### Executing Hydra During Development
When working on new features or bug fixes you need to be able to run Hydra from your working copy. This
can be done using [foreman](https://github.com/ddollar/foreman):
```
$ nix develop
$ nix-shell
$ # hack hack
$ ninja -C build
$ make
$ foreman start
```
@@ -105,7 +101,7 @@ Have a look at the [Procfile](./Procfile) if you want to see how the processes a
conflicts with services that might be running on your host, hydra and postgress are started on custom ports:
- hydra-server: 63333 with the username "alice" and the password "foobar"
- postgresql: 64444, can be connected to using `psql -p 64444 -h localhost hydra`
- postgresql: 64444
Note that this is only ever meant as an ad-hoc way of executing Hydra during development. Please make use of the
NixOS module for actually running Hydra in production.
@@ -119,24 +115,22 @@ Start by following the steps in [Development Environment](#development-environme
Then, you can run the tests and the perlcritic linter together with:
```console
$ nix develop
$ ninja -C build test
$ nix-shell
$ make check
```
You can run a single test with:
```
$ nix develop
$ cd build
$ meson test --test-args=../t/Hydra/Event.t testsuite
$ nix-shell
$ yath test ./t/foo/bar.t
```
And you can run just perlcritic with:
```
$ nix develop
$ cd build
$ meson test perlcritic
$ nix-shell
$ make perlcritic
```
### JSON API
@@ -146,7 +140,7 @@ You can also interface with Hydra through a JSON API. The API is defined in [hyd
## Additional Resources
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
- [Hydra on the NixOS Wiki](https://nixos.wiki/wiki/Hydra)
- [hydra-cli](https://github.com/nlewo/hydra-cli)
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)

2
bootstrap Executable file
View File

@@ -0,0 +1,2 @@
#! /bin/sh -e
exec autoreconf -vfi

85
configure.ac Normal file
View File

@@ -0,0 +1,85 @@
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version.txt)$VERSION_SUFFIX])])
AC_CONFIG_AUX_DIR(config)
AM_INIT_AUTOMAKE([foreign serial-tests])
AC_LANG([C++])
AC_PROG_CC
AC_PROG_INSTALL
AC_PROG_LN_S
AC_PROG_LIBTOOL
AC_PROG_CXX
CXXFLAGS+=" -std=c++17"
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_ARG_WITH([docbook-xsl],
[AS_HELP_STRING([--with-docbook-xsl=PATH],
[path of the DocBook XSL stylesheets])],
[docbookxsl="$withval"],
[docbookxsl="/docbook-xsl-missing"])
AC_SUBST([docbookxsl])
AC_DEFUN([NEED_PROG],
[
AC_PATH_PROG($1, $2)
if test -z "$$1"; then
AC_MSG_ERROR([$2 is required])
fi
])
NEED_PROG(perl, perl)
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
if test -n "$NIX_STORE" -a -n "$TMPDIR"
then
# This may be executed from within a build chroot, so pacify
# `nix-store' instead of letting it choke while trying to mkdir
# /nix/var.
NIX_STATE_DIR="$TMPDIR"
export NIX_STATE_DIR
fi
if NIX_REMOTE=daemon PAGER=cat "$NIX_STORE_PROGRAM" --timeout 123 -q; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no])
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
fi
PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
testPath="$(dirname $(type -p expr))"
AC_SUBST(testPath)
jobsPath="$(realpath ./t/jobs)"
AC_SUBST(jobsPath)
CXXFLAGS+=" -include nix/config.h"
AC_CONFIG_FILES([
Makefile
doc/Makefile
doc/manual/Makefile
src/Makefile
src/hydra-evaluator/Makefile
src/hydra-eval-jobs/Makefile
src/hydra-queue-runner/Makefile
src/sql/Makefile
src/ttf/Makefile
src/lib/Makefile
src/root/Makefile
src/script/Makefile
t/Makefile
t/jobs/config.nix
t/jobs/declarative/project.json
])
AC_CONFIG_COMMANDS([executable-scripts], [])
AC_CONFIG_HEADER([hydra-config.h])
AC_OUTPUT

View File

@@ -1,6 +1,6 @@
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
# returns an attribute set of the shape `{ defaultNix, shellNix }`
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
(import (fetchTarball https://github.com/edolstra/flake-compat/archive/master.tar.gz) {
src = ./.;
}).defaultNix

4
doc/Makefile.am Normal file
View File

@@ -0,0 +1,4 @@
SUBDIRS = manual
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
DIST_SUBDIRS = $(SUBDIRS)

6
doc/manual/Makefile.am Normal file
View File

@@ -0,0 +1,6 @@
MD_FILES = src/*.md
EXTRA_DIST = $(MD_FILES)
install: $(MD_FILES)
mdbook build . -d $(docdir)

View File

@@ -1,36 +0,0 @@
srcs = files(
'src/SUMMARY.md',
'src/about.md',
'src/api.md',
'src/configuration.md',
'src/hacking.md',
'src/installation.md',
'src/introduction.md',
'src/jobs.md',
'src/monitoring/README.md',
'src/notifications.md',
'src/plugins/README.md',
'src/plugins/RunCommand.md',
'src/plugins/declarative-projects.md',
'src/projects.md',
'src/webhooks.md',
)
manual = custom_target(
'manual',
command: [
mdbook,
'build',
'@SOURCE_ROOT@/doc/manual',
'-d', meson.current_build_dir() / 'html'
],
depend_files: srcs,
output: ['html'],
build_by_default: true,
)
install_subdir(
manual.full_path(),
install_dir: get_option('datadir') / 'doc/hydra',
strip_directory: true,
)

View File

@@ -10,7 +10,6 @@
- [RunCommand](./plugins/RunCommand.md)
- [Using the external API](api.md)
- [Webhooks](webhooks.md)
- [Webhook Authentication Migration Guide](webhook-migration-guide.md)
- [Monitoring Hydra](./monitoring/README.md)
## Developer's Guide

View File

@@ -51,12 +51,10 @@ base_uri example.com
`base_uri` should be your hydra servers proxied URL. If you are using
Hydra nixos module then setting `hydraURL` option should be enough.
You also need to configure your reverse proxy to pass `X-Request-Base`
to hydra, with the same value as `base_uri`.
This also covers the case of serving Hydra with a prefix path,
as in [http://example.com/hydra]().
For example if you are using nginx, then use configuration similar to
If you want to serve Hydra with a prefix path, for example
[http://example.com/hydra]() then you need to configure your reverse
proxy to pass `X-Request-Base` to hydra, with prefix path as value. For
example if you are using nginx, then use configuration similar to
following:
server {
@@ -65,7 +63,8 @@ following:
.. other configuration ..
location /hydra/ {
proxy_pass http://127.0.0.1:3000/;
proxy_pass http://127.0.0.1:3000;
proxy_redirect http://127.0.0.1:3000 https://example.com/hydra;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
@@ -75,33 +74,6 @@ following:
}
}
Note the trailing slash on the `proxy_pass` directive, which causes nginx to
strip off the `/hydra/` part of the URL before passing it to hydra.
Populating a Cache
------------------
A common use for Hydra is to pre-build and cache derivations which
take a long time to build. While it is possible to direcly access the
Hydra server's store over SSH, a more scalable option is to upload
built derivations to a remote store like an [S3-compatible object
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
the `store_uri` parameter will cause Hydra to sign and upload
derivations as they are built:
```
store_uri = s3://cache-bucket-name?compression=zstd&parallel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
```
This example uses [Zstandard](https://github.com/facebook/zstd)
compression on derivations to reduce CPU usage on the server, but
[Brotli](https://brotli.org/) compression for derivation listings and
build logs because it has better browser support.
See [`nix help
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
for a description of the store URI format.
Statsd Configuration
--------------------
@@ -159,8 +131,8 @@ use LDAP to manage roles and users.
This is configured by defining the `<ldap>` block in the configuration file.
In this block it's possible to configure the authentication plugin in the
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
The documentation for the available settings can be found
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
The documentation for the available settings can be found [here]
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
Note that the bind password (if needed) should be supplied as an included file to
prevent it from leaking to the Nix store.
@@ -207,13 +179,11 @@ Example configuration:
<role_search_options>
deref = always
</role_search_options>
</store>
</config>
<role_mapping>
# Make all users in the hydra_admin group Hydra admins
hydra_admin = admin
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
dev = eval-jobset
# Allow all users in the dev group to restart jobs and cancel builds
dev = restart-jobs
dev = cancel-build
</role_mapping>
@@ -266,40 +236,6 @@ default role mapping:
Note that configuring both the LDAP parameters in the hydra.conf and via
the environment variable is a fatal error.
Webhook Authentication
---------------------
Hydra supports authenticating webhook requests from GitHub and Gitea to prevent unauthorized job evaluations.
Webhook secrets should be stored in separate files outside the Nix store for security using Config::General's include mechanism.
In your main `hydra.conf`:
```apache
<webhooks>
Include /var/lib/hydra/secrets/webhook-secrets.conf
</webhooks>
```
Then create `/var/lib/hydra/secrets/webhook-secrets.conf` with your actual secrets:
```apache
<github>
secret = your-github-webhook-secret
</github>
<gitea>
secret = your-gitea-webhook-secret
</gitea>
```
For multiple secrets (useful for rotation or multiple environments), use an array:
```apache
<github>
secret = your-github-webhook-secret-prod
secret = your-github-webhook-secret-staging
</github>
```
**Important**: The secrets file should have restricted permissions (e.g., 0600) to prevent unauthorized access.
See the [Webhooks documentation](webhooks.md) for detailed setup instructions.
Embedding Extra HTML
--------------------

View File

@@ -12,26 +12,24 @@ To enter a shell in which all environment variables (such as `PERL5LIB`)
and dependencies can be found:
```console
$ nix develop
$ nix-shell
```
To build Hydra, you should then do:
```console
$ mesonConfigurePhase
$ ninja
[nix-shell]$ ./bootstrap
[nix-shell]$ configurePhase
[nix-shell]$ make
```
You start a local database, the webserver, and other components with
foreman:
```console
$ ninja -C build
$ foreman start
```
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
You can run just the Hydra web server in your source tree as follows:
```console
@@ -41,20 +39,17 @@ $ ./src/script/hydra-server
You can run Hydra's test suite with the following:
```console
$ meson test
# to run as many tests as you have cores:
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
[nix-shell]$ make check
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ make check YATH_JOB_COUNT=$NIX_BUILD_CORES
[nix-shell]$ # or run yath directly:
[nix-shell]$ yath test
[nix-shell]$ # to run as many tests as you have cores:
[nix-shell]$ yath test -j $NIX_BUILD_CORES
```
To run individual tests:
```console
# Run a specific test file
$ PERL5LIB=t/lib:$PERL5LIB perl t/test.pl t/Hydra/Controller/API/checks.t
# Run all tests in a directory
$ PERL5LIB=t/lib:$PERL5LIB perl t/test.pl t/Hydra/Controller/API/
```
When using `yath` instead of `make check`, ensure you have run `make`
in the root of the repository at least once.
**Warning**: Currently, the tests can fail
if run with high parallelism [due to an issue in
@@ -72,7 +67,7 @@ will reload the page every time you save.
To build Hydra and its dependencies:
```console
$ nix build .#packages.x86_64-linux.default
$ nix-build release.nix -A build.x86_64-linux
```
## Development Tasks

View File

@@ -48,7 +48,7 @@ Getting Nix
If your server runs NixOS you are all set to continue with installation
of Hydra. Otherwise you first need to install Nix. The latest stable
version can be found one [the Nix web
site](https://nixos.org/download/), along with a manual, which
site](http://nixos.org/nix/download.html), along with a manual, which
includes installation instructions.
Installation

View File

@@ -42,7 +42,7 @@ Sets CircleCI status.
## Compress build logs
Compresses build logs after a build with bzip2 or zstd.
Compresses build logs after a build with bzip2.
### Configuration options
@@ -50,14 +50,6 @@ Compresses build logs after a build with bzip2 or zstd.
Enable log compression
- `compress_build_logs_compression`
Which compression format to use. Valid values are bzip2 (default) and zstd.
- `compress_build_logs_silent`
Whether to compress logs silently.
### Example
```xml

View File

@@ -404,10 +404,3 @@ analogous:
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
Content-addressed derivations
-----------------------------
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.

View File

@@ -1,168 +0,0 @@
# Webhook Authentication Migration Guide
This guide helps Hydra administrators migrate from unauthenticated webhooks to authenticated webhooks to secure their Hydra instances against unauthorized job evaluations.
## Why Migrate?
Currently, Hydra's webhook endpoints (`/api/push-github` and `/api/push-gitea`) accept any POST request without authentication. This vulnerability allows:
- Anyone to trigger expensive job evaluations
- Potential denial of service through repeated requests
- Manipulation of build timing and scheduling
## Step-by-Step Migration for NixOS
### 1. Create Webhook Configuration
Create a webhook secrets configuration file with the generated secrets:
```bash
# Create the secrets configuration file with inline secret generation
cat > /var/lib/hydra/secrets/webhook-secrets.conf <<EOF
<github>
secret = $(openssl rand -hex 32)
</github>
<gitea>
secret = $(openssl rand -hex 32)
</gitea>
EOF
# Set secure permissions
chmod 0440 /var/lib/hydra/secrets/webhook-secrets.conf
chown hydra:hydra /var/lib/hydra/secrets/webhook-secrets.conf
```
**Important**: Save the generated secrets to configure them in GitHub/Gitea later. You can view them with:
```bash
cat /var/lib/hydra/secrets/webhook-secrets.conf
```
Then update your NixOS configuration to include the webhook configuration:
```nix
{
services.hydra-dev = {
enable = true;
hydraURL = "https://hydra.example.com";
notificationSender = "hydra@example.com";
extraConfig = ''
<webhooks>
Include /var/lib/hydra/secrets/webhook-secrets.conf
</webhooks>
'';
};
}
```
For multiple secrets (useful for rotation or multiple environments), update your webhook-secrets.conf:
```apache
<github>
secret = your-github-webhook-secret-prod
secret = your-github-webhook-secret-staging
</github>
<gitea>
secret = your-gitea-webhook-secret
</gitea>
```
### 2. Deploy Configuration
Apply the NixOS configuration:
```bash
nixos-rebuild switch
```
This will automatically restart Hydra services with the new configuration.
### 3. Verify Configuration
Check Hydra's logs to ensure secrets were loaded successfully:
```bash
journalctl -u hydra-server | grep -i webhook
```
You should not see warnings about webhook authentication not being configured.
### 4. Update Your Webhooks
#### GitHub
1. Navigate to your repository settings: `https://github.com/<owner>/<repo>/settings/hooks`
2. Edit your existing Hydra webhook
3. In the "Secret" field, paste the content of `/var/lib/hydra/secrets/github-webhook-secret`
4. Click "Update webhook"
5. GitHub will send a ping event to verify the configuration
#### Gitea
1. Navigate to your repository webhook settings
2. Edit your existing Hydra webhook
3. In the "Secret" field, paste the content of `/var/lib/hydra/secrets/gitea-webhook-secret`
4. Click "Update Webhook"
5. Use the "Test Delivery" button to verify the configuration
### 5. Test the Configuration
After updating each webhook:
1. Make a test commit to trigger the webhook
2. Check Hydra's logs for successful authentication
3. Verify the evaluation was triggered in Hydra's web interface
## Troubleshooting
### 401 Unauthorized Errors
If webhooks start failing with 401 errors:
- Verify the secret in the Git forge matches the file content exactly
- Check file permissions: `ls -la /var/lib/hydra/secrets/`
- Ensure no extra whitespace in secret files
- Check Hydra logs for specific error messages
### Webhook Still Unauthenticated
If you see warnings about unauthenticated webhooks after configuration:
- Verify the configuration syntax in your NixOS module
- Ensure the NixOS configuration was successfully applied
- Check that the webhook-secrets.conf file exists and is readable by the Hydra user
- Verify the Include path is correct in your hydra.conf
- Check the syntax of your webhook-secrets.conf file
### Testing Without Git Forge
You can test webhook authentication using curl:
```bash
# Read the secret
SECRET=$(cat /var/lib/hydra/secrets/github-webhook-secret)
# Create test payload
PAYLOAD='{"ref":"refs/heads/main","repository":{"clone_url":"https://github.com/test/repo.git"}}'
# Calculate signature
SIGNATURE="sha256=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2)"
# Send authenticated request
curl -X POST https://your-hydra/api/push-github \
-H "Content-Type: application/json" \
-H "X-Hub-Signature-256: $SIGNATURE" \
-d "$PAYLOAD"
```
For Gitea (no prefix in signature):
```bash
# Read the secret
SECRET=$(cat /var/lib/hydra/secrets/gitea-webhook-secret)
# Create test payload
PAYLOAD='{"ref":"refs/heads/main","repository":{"clone_url":"https://gitea.example.com/test/repo.git"}}'
# Calculate signature
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2)
# Send authenticated request
curl -X POST https://your-hydra/api/push-gitea \
-H "Content-Type: application/json" \
-H "X-Gitea-Signature: $SIGNATURE" \
-d "$PAYLOAD"
```

View File

@@ -1,101 +1,13 @@
# Webhooks
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
Hydra can be notified by github's webhook to trigger a new evaluation when a
jobset has a github repo in its input.
## Webhook Authentication
Hydra supports webhook signature verification for both GitHub and Gitea using HMAC-SHA256. This ensures that webhook
requests are coming from your configured Git forge and haven't been tampered with.
### Configuring Webhook Authentication
1. **Create webhook configuration**: Generate and store webhook secrets securely:
```bash
# Create directory and generate secrets in one step
mkdir -p /var/lib/hydra/secrets
cat > /var/lib/hydra/secrets/webhook-secrets.conf <<EOF
<github>
secret = $(openssl rand -hex 32)
</github>
<gitea>
secret = $(openssl rand -hex 32)
</gitea>
EOF
# Set secure permissions
chmod 0600 /var/lib/hydra/secrets/webhook-secrets.conf
chown hydra:hydra /var/lib/hydra/secrets/webhook-secrets.conf
```
2. **Configure Hydra**: Add the following to your `hydra.conf`:
```apache
<webhooks>
Include /var/lib/hydra/secrets/webhook-secrets.conf
</webhooks>
```
3. **Configure your Git forge**: View the generated secrets and configure them in GitHub/Gitea:
```bash
grep "secret =" /var/lib/hydra/secrets/webhook-secrets.conf
```
### Multiple Secrets Support
Hydra supports configuring multiple secrets for each platform, which is useful for:
- Zero-downtime secret rotation
- Supporting multiple environments (production/staging)
- Gradual migration of webhooks
To configure multiple secrets, use array syntax:
```apache
<github>
secret = current-webhook-secret
secret = previous-webhook-secret
</github>
```
## GitHub
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
and in the `Webhooks` tab click on `Add webhook`.
To set up a github webhook go to `https://github.com/<yourhandle>/<yourrepo>/settings` and in the `Webhooks` tab
click on `Add webhook`.
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
- In `Content type` switch to `application/json`.
- In the `Secret` field, enter the content of your GitHub webhook secret file (if authentication is configured).
- The `Secret` field can stay empty.
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
Then add the hook with `Add webhook`.
### Verifying GitHub Webhook Security
After configuration, GitHub will send webhook requests with an `X-Hub-Signature-256` header containing the HMAC-SHA256
signature of the request body. Hydra will verify this signature matches the configured secret.
## Gitea
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
- In the `Secret` field, enter the content of your Gitea webhook secret file (if authentication is configured).
- Change the branch filter to match the git branch hydra builds.
Then add the hook with `Add webhook`.
### Verifying Gitea Webhook Security
After configuration, Gitea will send webhook requests with an `X-Gitea-Signature` header containing the HMAC-SHA256
signature of the request body. Hydra will verify this signature matches the configured secret.
## Troubleshooting
If you receive 401 Unauthorized errors:
- Verify the webhook secret in your Git forge matches the content of the secret file exactly
- Check that the secret file has proper permissions (should be 0600)
- Look at Hydra's logs for specific error messages
- Ensure the correct signature header is being sent by your Git forge
If you see warnings about webhook authentication not being configured:
- Configure webhook authentication as described above to secure your endpoints

View File

@@ -1,5 +1,5 @@
#
# jobset example file. This file can be referenced as Nix expression
# jobset example file. This file canbe referenced as Nix expression
# in a jobset configuration along with inputs for nixpkgs and the
# repository containing this file.
#

71
flake.lock generated
View File

@@ -1,60 +1,81 @@
{
"nodes": {
"nix": {
"lowdown-src": {
"flake": false,
"locked": {
"lastModified": 1760573252,
"narHash": "sha256-mcvNeNdJP5R7huOc8Neg0qZESx/0DMg8Fq6lsdx0x8U=",
"owner": "NixOS",
"repo": "nix",
"rev": "3c39583e5512729f9c5a44c3b03b6467a2acd963",
"lastModified": 1633514407,
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
"owner": "kristapsdz",
"repo": "lowdown",
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "2.32-maintenance",
"repo": "nix",
"owner": "kristapsdz",
"repo": "lowdown",
"type": "github"
}
},
"nix-eval-jobs": {
"flake": false,
"nix": {
"inputs": {
"lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs",
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1760478325,
"narHash": "sha256-hA+NOH8KDcsuvH7vJqSwk74PyZP3MtvI/l+CggZcnTc=",
"owner": "nix-community",
"repo": "nix-eval-jobs",
"rev": "daa42f9e9c84aeff1e325dd50fda321f53dfd02c",
"lastModified": 1668607642,
"narHash": "sha256-lNnk5thRq43XPcA+5KwoHgdsKf3urmE4B2xzHokVMbc=",
"owner": "edolstra",
"repo": "nix",
"rev": "561440bd6ddebd53d7b42bced22cb78fd607a6de",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "v2.32.1",
"repo": "nix-eval-jobs",
"owner": "edolstra",
"ref": "lazy-trees",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1759652726,
"narHash": "sha256-2VjnimOYDRb3DZHyQ2WH2KCouFqYm9h0Rr007Al/WSA=",
"lastModified": 1657693803,
"narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "06b2985f0cc9eb4318bf607168f4b15af1e5e81d",
"rev": "365e1b3a859281cf11b94f87231adeabbdd878a2",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05-small",
"ref": "nixos-22.05-small",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"root": {
"inputs": {
"nix": "nix",
"nix-eval-jobs": "nix-eval-jobs",
"nixpkgs": "nixpkgs"
"nixpkgs": [
"nix",
"nixpkgs"
]
}
}
},

648
flake.nix
View File

@@ -1,78 +1,551 @@
{
description = "A Nix-based continuous build system";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small";
inputs.nixpkgs.follows = "nix/nixpkgs";
inputs.nix.url = "github:edolstra/nix/lazy-trees";
inputs.nix = {
url = "github:NixOS/nix/2.32-maintenance";
# We want to control the deps precisely
flake = false;
};
inputs.nix-eval-jobs = {
url = "github:nix-community/nix-eval-jobs/v2.32.1";
# We want to control the deps precisely
flake = false;
};
outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }:
outputs = { self, nixpkgs, nix }:
let
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
systems = [ "x86_64-linux" "aarch64-linux" ];
forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsBySystem = forEachSystem (system: import nixpkgs {
inherit system;
overlays = [ self.overlays.default nix.overlays.default ];
});
# NixOS configuration used for VM tests.
hydraServer =
{ config, pkgs, ... }:
{
imports = [ self.nixosModules.hydraTest ];
virtualisation.memorySize = 1024;
virtualisation.writableStore = true;
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
nix = {
# Without this nix tries to fetch packages from the default
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
binaryCaches = [ ];
};
};
in
rec {
# A Nixpkgs overlay that provides a 'hydra' package.
overlays.default = final: prev: {
nixDependenciesForHydra = final.lib.makeScope final.newScope
(import (nix + "/packaging/dependencies.nix") {
pkgs = final;
inherit (final) stdenv;
inputs = {};
});
nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope
(import (nix + "/packaging/components.nix") {
officialRelease = true;
inherit (final) lib;
pkgs = final;
src = nix;
maintainers = [ ];
});
nix-eval-jobs = final.callPackage nix-eval-jobs {
nixComponents = final.nixComponentsForHydra;
# Add LDAP dependencies that aren't currently found within nixpkgs.
perlPackages = prev.perlPackages // {
PrometheusTiny = final.perlPackages.buildPerlPackage {
pname = "Prometheus-Tiny";
version = "0.007";
src = final.fetchurl {
url = "mirror://cpan/authors/id/R/RO/ROBN/Prometheus-Tiny-0.007.tar.gz";
sha256 = "0ef8b226a2025cdde4df80129dd319aa29e884e653c17dc96f4823d985c028ec";
};
buildInputs = with final.perlPackages; [ HTTPMessage Plack TestException ];
meta = {
homepage = "https://github.com/robn/Prometheus-Tiny";
description = "A tiny Prometheus client";
license = with final.lib.licenses; [ artistic1 gpl1Plus ];
};
};
};
hydra = final.callPackage ./package.nix {
inherit (final.lib) fileset;
rawSrc = self;
nixComponents = final.nixComponentsForHydra;
hydra = with final; let
perlDeps = buildEnv {
name = "hydra-perl-deps";
paths = with perlPackages; lib.closePropagation
[
AuthenSASL
CatalystActionREST
CatalystAuthenticationStoreDBIxClass
CatalystAuthenticationStoreLDAP
CatalystDevel
CatalystPluginAccessLog
CatalystPluginAuthorizationRoles
CatalystPluginCaptcha
CatalystPluginPrometheusTiny
CatalystPluginSessionStateCookie
CatalystPluginSessionStoreFastMmap
CatalystPluginStackTrace
CatalystTraitForRequestProxyBase
CatalystViewDownload
CatalystViewJSON
CatalystViewTT
CatalystXRoleApplicator
CatalystXScriptServerStarman
CryptPassphrase
CryptPassphraseArgon2
CryptRandPasswd
DataDump
DateTime
DBDPg
DBDSQLite
DigestSHA1
EmailMIME
EmailSender
FileLibMagic
FileSlurper
FileWhich
final.nix.perl-bindings
git
IOCompress
IPCRun
IPCRun3
JSON
JSONMaybeXS
JSONXS
ListSomeUtils
LWP
LWPProtocolHttps
ModulePluggable
NetAmazonS3
NetPrometheus
NetStatsd
PadWalker
ParallelForkManager
PerlCriticCommunity
PrometheusTinyShared
ReadonlyX
SetScalar
SQLSplitStatement
Starman
StringCompareConstantTime
SysHostnameLong
TermSizeAny
TermReadKey
Test2Harness
TestPostgreSQL
TextDiff
TextTable
UUID4Tiny
YAML
XMLSimple
];
};
in
stdenv.mkDerivation {
name = "hydra-${version}";
src = self;
buildInputs =
[
makeWrapper
autoconf
automake
libtool
unzip
nukeReferences
pkg-config
libpqxx
top-git
mercurial
darcs
subversion
breezy
openssl
bzip2
libxslt
final.nix
perlDeps
perl
mdbook
pixz
boost
postgresql_13
(if lib.versionAtLeast lib.version "20.03pre"
then nlohmann_json
else nlohmann_json.override { multipleHeaders = true; })
prometheus-cpp
];
checkInputs = [
cacert
foreman
glibcLocales
libressl.nc
openldap
python3
];
hydraPath = lib.makeBinPath (
[
subversion
openssh
final.nix
coreutils
findutils
pixz
gzip
bzip2
xz
gnutar
unzip
git
top-git
mercurial
darcs
gnused
breezy
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
);
OPENLDAP_ROOT = openldap;
shellHook = ''
pushd $(git rev-parse --show-toplevel) >/dev/null
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
export HYDRA_HOME="$(pwd)/src/"
mkdir -p .hydra-data
export HYDRA_DATA="$(pwd)/.hydra-data"
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
popd >/dev/null
'';
preConfigure = "autoreconf -vfi";
NIX_LDFLAGS = [ "-lpthread" ];
enableParallelBuilding = true;
doCheck = true;
preCheck = ''
patchShebangs .
export LOGNAME=''${LOGNAME:-foo}
# set $HOME for bzr so it can create its trace file
export HOME=$(mktemp -d)
'';
postInstall = ''
mkdir -p $out/nix-support
for i in $out/bin/*; do
read -n 4 chars < $i
if [[ $chars =~ ELF ]]; then continue; fi
wrapProgram $i \
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
--prefix PATH ':' $out/bin:$hydraPath \
--set HYDRA_RELEASE ${version} \
--set HYDRA_HOME $out/libexec/hydra \
--set NIX_RELEASE ${final.nix.name or "unknown"}
done
'';
dontStrip = true;
meta.description = "Build of Hydra on ${final.stdenv.system}";
passthru = { inherit perlDeps; inherit (final) nix; };
};
};
hydraJobs = {
build = forEachSystem (system: packages.${system}.hydra);
buildNoTests = forEachSystem (system:
packages.${system}.hydra.overrideAttrs (_: {
doCheck = false;
})
);
manual = forEachSystem (system: let
pkgs = nixpkgs.legacyPackages.${system};
hydra = self.packages.${pkgs.hostPlatform.system}.hydra;
in
pkgs.runCommand "hydra-manual-${hydra.version}" { }
manual = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "hydra-manual-${version}" { }
''
mkdir -p $out/share
cp -prvd ${hydra.doc}/share/doc $out/share/
cp -prvd ${pkgs.hydra}/share/doc $out/share/
mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
'');
tests = import ./nixos-tests.nix {
inherit forEachSystem nixpkgs nixosModules;
};
tests.install = forEachSystem (system:
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
nodes.machine = hydraServer;
testScript =
''
machine.wait_for_job("hydra-init")
machine.wait_for_job("hydra-server")
machine.wait_for_job("hydra-evaluator")
machine.wait_for_job("hydra-queue-runner")
machine.wait_for_open_port("3000")
machine.succeed("curl --fail http://localhost:3000/")
'';
});
tests.notifications = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
<influxdb>
url = http://127.0.0.1:8086
db = hydra
</influxdb>
'';
services.influxdb.enable = true;
};
testScript = ''
machine.wait_for_job("hydra-init")
# Create an admin account and some other state.
machine.succeed(
"""
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
mkdir /run/jobset
chmod 755 /run/jobset
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
chmod 644 /run/jobset/default.nix
chown -R hydra /run/jobset
"""
)
# Wait until InfluxDB can receive web requests
machine.wait_for_job("influxdb")
machine.wait_for_open_port("8086")
# Create an InfluxDB database where hydra will write to
machine.succeed(
"curl -XPOST 'http://127.0.0.1:8086/query' "
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
)
# Wait until hydra-server can receive HTTP requests
machine.wait_for_job("hydra-server")
machine.wait_for_open_port("3000")
# Setup the project and jobset
machine.succeed(
"su - hydra -c 'perl -I ${pkgs.hydra.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
)
# Wait until hydra has build the job and
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
machine.wait_until_succeeds(
"curl -s -H 'Accept: application/csv' "
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
)
'';
});
tests.gitea = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
makeTest {
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
<gitea_authorization>
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
</gitea_authorization>
'';
nix = {
distributedBuilds = true;
buildMachines = [{
hostName = "localhost";
systems = [ system ];
}];
binaryCaches = [ ];
};
services.gitea = {
enable = true;
database.type = "postgres";
disableRegistration = true;
httpPort = 3001;
};
services.openssh.enable = true;
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
networking.firewall.allowedTCPPorts = [ 3000 ];
};
skipLint = true;
testScript =
let
scripts.mktoken = pkgs.writeText "token.sql" ''
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7');
'';
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
set -x
mkdir -p /tmp/repo $HOME/.ssh
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
chmod 0400 $HOME/.ssh/privk
git -C /tmp/repo init
cp ${smallDrv} /tmp/repo/jobset.nix
git -C /tmp/repo add .
git config --global user.email test@localhost
git config --global user.name test
git -C /tmp/repo commit -m 'Initial import'
git -C /tmp/repo remote add origin gitea@machine:root/repo
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
git -C /tmp/repo push origin master
git -C /tmp/repo log >&2
'';
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
set -x
su -l hydra -c "hydra-create-user root --email-address \
'alice@example.org' --password foobar --role admin"
URL=http://localhost:3000
USERNAME="root"
PASSWORD="foobar"
PROJECT_NAME="trivial"
JOBSET_NAME="trivial"
mycurl() {
curl --referer $URL -H "Accept: application/json" \
-H "Content-Type: application/json" $@
}
cat >data.json <<EOF
{ "username": "$USERNAME", "password": "$PASSWORD" }
EOF
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
cat >data.json <<EOF
{
"displayname":"Trivial",
"enabled":"1",
"visible":"1"
}
EOF
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
-d @data.json -b hydra-cookie.txt
cat >data.json <<EOF
{
"description": "Trivial",
"checkinterval": "60",
"enabled": "1",
"visible": "1",
"keepnr": "1",
"enableemail": true,
"emailoverride": "hydra@localhost",
"type": 0,
"nixexprinput": "git",
"nixexprpath": "jobset.nix",
"inputs": {
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
"gitea_repo_name": {"value": "repo", "type": "string"},
"gitea_repo_owner": {"value": "root", "type": "string"},
"gitea_status_repo": {"value": "git", "type": "string"},
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
}
}
EOF
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
-d @data.json -b hydra-cookie.txt
'';
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
snakeoilKeypair = {
privkey = pkgs.writeText "privkey.snakeoil" ''
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
-----END EC PRIVATE KEY-----
'';
pubkey = pkgs.lib.concatStrings [
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
];
};
smallDrv = pkgs.writeText "jobset.nix" ''
{ trivial = builtins.derivation {
name = "trivial";
system = "${system}";
builder = "/bin/sh";
allowSubstitutes = false;
preferLocalBuild = true;
args = ["-c" "echo success > $out; exit 0"];
};
}
'';
in
''
import json
machine.start()
machine.wait_for_unit("multi-user.target")
machine.wait_for_open_port(3000)
machine.wait_for_open_port(3001)
machine.succeed(
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
+ "--username root --password root --email test@localhost'"
)
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
machine.succeed(
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
)
machine.succeed(
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
)
machine.succeed(
"${scripts.git-setup}"
)
machine.succeed(
"${scripts.hydra-setup}"
)
machine.wait_until_succeeds(
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
+ '| jq .buildstatus | xargs test 0 -eq'
)
data = machine.succeed(
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
)
response = json.loads(data)
assert len(response) == 2, "Expected exactly two status updates for latest commit!"
assert response[0]['status'] == "success", "Expected latest status to be success!"
assert response[1]['status'] == "pending", "Expected first status to be pending!"
machine.shutdown()
'';
});
tests.validate-openapi = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "validate-openapi"
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
''
openapi-generator-cli validate -i ${./hydra-api.yaml}
touch $out
'');
container = nixosConfigurations.container.config.system.build.toplevel;
};
@@ -83,44 +556,61 @@
validate-openapi = hydraJobs.tests.validate-openapi.${system};
});
packages = forEachSystem (system: let
inherit (nixpkgs) lib;
pkgs = nixpkgs.legacyPackages.${system};
nixDependencies = lib.makeScope pkgs.newScope
(import (nix + "/packaging/dependencies.nix") {
inherit pkgs;
inherit (pkgs) stdenv;
inputs = {};
});
nixComponents = lib.makeScope nixDependencies.newScope
(import (nix + "/packaging/components.nix") {
officialRelease = true;
inherit lib pkgs;
src = nix;
maintainers = [ ];
});
in {
nix-eval-jobs = pkgs.callPackage nix-eval-jobs {
inherit nixComponents;
};
hydra = pkgs.callPackage ./package.nix {
inherit (nixpkgs.lib) fileset;
inherit nixComponents;
inherit (self.packages.${system}) nix-eval-jobs;
rawSrc = self;
};
default = self.packages.${system}.hydra;
packages = forEachSystem (system: {
hydra = pkgsBySystem.${system}.hydra;
default = pkgsBySystem.${system}.hydra;
});
nixosModules = import ./nixos-modules {
inherit self;
nixosModules.hydra = {
imports = [ ./hydra-module.nix ];
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
};
nixosModules.hydraTest = { pkgs, ... }: {
imports = [ self.nixosModules.hydra ];
services.hydra-dev.enable = true;
services.hydra-dev.hydraURL = "http://hydra.example.org";
services.hydra-dev.notificationSender = "admin@hydra.example.org";
systemd.services.hydra-send-stats.enable = false;
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_11;
# The following is to work around the following error from hydra-server:
# [error] Caught exception in engine "Cannot determine local time zone"
time.timeZone = "UTC";
nix.extraOptions = ''
allowed-uris = https://github.com/
'';
};
nixosModules.hydraProxy = {
services.httpd = {
enable = true;
adminAddr = "hydra-admin@example.org";
extraConfig = ''
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
ProxyRequests Off
ProxyPreserveHost On
ProxyPass /apache-errors !
ErrorDocument 503 /apache-errors/503.html
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
ProxyPassReverse / http://127.0.0.1:3000/
'';
};
};
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules =
[
self.nixosModules.hydra
self.nixosModules.hydraTest
self.nixosModules.hydraProxy
{

View File

@@ -1,7 +1,5 @@
#!/bin/sh
export PATH=$(pwd)/src/script:$PATH
# wait for hydra-server to listen
while ! nc -z localhost 63333; do sleep 1; done

View File

@@ -1,7 +1,5 @@
#!/bin/sh
export PATH=$(pwd)/src/script:$PATH
# wait for postgresql to listen
while ! pg_isready -h $(pwd)/.hydra-data/postgres -p 64444; do sleep 1; done

View File

@@ -1,7 +1,5 @@
#!/bin/sh
export PATH=$(pwd)/src/script:$PATH
# wait for hydra-server to listen
while ! nc -z localhost 63333; do sleep 1; done

View File

@@ -70,7 +70,7 @@ paths:
$ref: '#/components/examples/projects-success'
/api/push:
post:
put:
summary: trigger jobsets
parameters:
- in: query
@@ -78,11 +78,6 @@ paths:
description: project and jobset formatted as "<project>:<jobset>" to evaluate
schema:
type: string
- in: query
name: force
description: when set to true the jobset gets evaluated even when it did not change
schema:
type: boolean
responses:
'200':
description: jobset trigger response
@@ -538,13 +533,13 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/eval/{eval-id}:
/eval/{build-id}:
get:
summary: Retrieves evaluations identified by eval id
summary: Retrieves evaluations identified by build id
parameters:
- name: eval-id
- name: build-id
in: path
description: eval identifier
description: build identifier
required: true
schema:
type: integer
@@ -556,24 +551,6 @@ paths:
schema:
$ref: '#/components/schemas/JobsetEval'
/eval/{eval-id}/builds:
get:
summary: Retrieves all builds belonging to an evaluation identified by eval id
parameters:
- name: eval-id
in: path
description: eval identifier
required: true
schema:
type: integer
responses:
'200':
description: builds
content:
application/json:
schema:
$ref: '#/components/schemas/JobsetEvalBuilds'
components:
schemas:
@@ -819,13 +796,6 @@ components:
additionalProperties:
$ref: '#/components/schemas/JobsetEvalInput'
JobsetEvalBuilds:
type: array
items:
type: object
additionalProperties:
$ref: '#/components/schemas/Build'
JobsetOverview:
type: array
items:
@@ -900,7 +870,7 @@ components:
description: Size of the produced file
type: integer
defaultpath:
description: if path is a directory, the default file relative to path to be served
description: This is a Git/Mercurial commit hash or a Subversion revision number
type: string
'type':
description: Types of build product (user defined)

View File

@@ -68,6 +68,8 @@ in
package = mkOption {
type = types.path;
default = pkgs.hydra;
defaultText = literalExpression "pkgs.hydra";
description = "The Hydra package.";
};
@@ -228,10 +230,10 @@ in
nix.settings = {
trusted-users = [ "hydra-queue-runner" ];
keep-outputs = true;
keep-derivations = true;
gc-keep-outputs = true;
gc-keep-derivations = true;
};
services.hydra-dev.extraConfig =
''
using_frontend_proxy = 1
@@ -338,9 +340,8 @@ in
systemd.services.hydra-queue-runner =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
wants = [ "network-online.target" ];
after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ];
after = [ "hydra-init.service" "network.target" ];
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
restartTriggers = [ hydraConf ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
@@ -364,7 +365,7 @@ in
requires = [ "hydra-init.service" ];
restartTriggers = [ hydraConf ];
after = [ "hydra-init.service" "network.target" ];
path = with pkgs; [ hostname-debian cfg.package ];
path = with pkgs; [ nettools cfg.package jq ];
environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
};
@@ -407,7 +408,6 @@ in
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" ];
restartTriggers = [ hydraConf ];
path = [ pkgs.zstd ];
environment = env // {
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
@@ -458,17 +458,10 @@ in
# logs automatically after a step finishes, but this doesn't work
# if the queue runner is stopped prematurely.
systemd.services.hydra-compress-logs =
{ path = [ pkgs.bzip2 pkgs.zstd ];
{ path = [ pkgs.bzip2 ];
script =
''
set -eou pipefail
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
if [[ $compression == "" || $compression == bzip2 ]]; then
compressionCmd=(bzip2)
elif [[ $compression == zstd ]]; then
compressionCmd=(zstd --rm)
fi
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
find ${baseDir}/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
'';
startAt = "Sun 01:45";
};

View File

@@ -1,26 +0,0 @@
project('hydra', 'cpp',
version: files('version.txt'),
license: 'GPL-3.0',
default_options: [
'debug=true',
'optimization=2',
'cpp_std=c++23',
],
)
nix_util_dep = dependency('nix-util', required: true)
nix_store_dep = dependency('nix-store', required: true)
nix_main_dep = dependency('nix-main', required: true)
pqxx_dep = dependency('libpqxx', required: true)
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
mdbook = find_program('mdbook', native: true)
perl = find_program('perl', native: true)
subdir('doc/manual')
subdir('nixos-modules')
subdir('src')
subdir('t')

View File

@@ -1,47 +0,0 @@
{ self }:
{
hydra = { pkgs, lib,... }: {
_file = ./default.nix;
imports = [ ./hydra.nix ];
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
};
hydraTest = { pkgs, ... }: {
services.hydra-dev.enable = true;
services.hydra-dev.hydraURL = "http://hydra.example.org";
services.hydra-dev.notificationSender = "admin@hydra.example.org";
systemd.services.hydra-send-stats.enable = false;
services.postgresql.enable = true;
# The following is to work around the following error from hydra-server:
# [error] Caught exception in engine "Cannot determine local time zone"
time.timeZone = "UTC";
nix.extraOptions = ''
allowed-uris = https://github.com/
'';
};
hydraProxy = {
services.httpd = {
enable = true;
adminAddr = "hydra-admin@example.org";
extraConfig = ''
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
ProxyRequests Off
ProxyPreserveHost On
ProxyPass /apache-errors !
ErrorDocument 503 /apache-errors/503.html
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
ProxyPassReverse / http://127.0.0.1:3000/
'';
};
};
}

View File

@@ -1,4 +0,0 @@
install_data('hydra.nix',
install_dir: get_option('datadir') / 'nix',
rename: ['hydra-module.nix'],
)

View File

@@ -1,306 +0,0 @@
{ forEachSystem, nixpkgs, nixosModules }:
let
# NixOS configuration used for VM tests.
hydraServer =
{ pkgs, ... }:
{
imports = [
nixosModules.hydra
nixosModules.hydraTest
];
virtualisation.memorySize = 1024;
virtualisation.writableStore = true;
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
nix = {
# Without this nix tries to fetch packages from the default
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
settings.substituters = [ ];
};
};
in
{
install = forEachSystem (system:
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
name = "hydra-install";
nodes.machine = hydraServer;
testScript =
''
machine.wait_for_job("hydra-init")
machine.wait_for_job("hydra-server")
machine.wait_for_job("hydra-evaluator")
machine.wait_for_job("hydra-queue-runner")
machine.wait_for_open_port(3000)
machine.succeed("curl --fail http://localhost:3000/")
'';
});
notifications = forEachSystem (system:
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
name = "hydra-notifications";
nodes.machine = {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
<influxdb>
url = http://127.0.0.1:8086
db = hydra
</influxdb>
'';
services.influxdb.enable = true;
};
testScript = { nodes, ... }: ''
machine.wait_for_job("hydra-init")
# Create an admin account and some other state.
machine.succeed(
"""
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
mkdir /run/jobset
chmod 755 /run/jobset
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
chmod 644 /run/jobset/default.nix
chown -R hydra /run/jobset
"""
)
# Wait until InfluxDB can receive web requests
machine.wait_for_job("influxdb")
machine.wait_for_open_port(8086)
# Create an InfluxDB database where hydra will write to
machine.succeed(
"curl -XPOST 'http://127.0.0.1:8086/query' "
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
)
# Wait until hydra-server can receive HTTP requests
machine.wait_for_job("hydra-server")
machine.wait_for_open_port(3000)
# Setup the project and jobset
machine.succeed(
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
)
# Wait until hydra has build the job and
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
machine.wait_until_succeeds(
"curl -s -H 'Accept: application/csv' "
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
)
'';
});
gitea = forEachSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
in
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
name = "hydra-gitea";
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
<gitea_authorization>
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
</gitea_authorization>
'';
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
nix = {
settings.substituters = [ ];
};
services.gitea = {
enable = true;
database.type = "postgres";
settings = {
service.DISABLE_REGISTRATION = true;
server.HTTP_PORT = 3001;
};
};
services.openssh.enable = true;
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
networking.firewall.allowedTCPPorts = [ 3000 ];
};
skipLint = true;
testScript =
let
scripts.mktoken = pkgs.writeText "token.sql" ''
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
'';
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
set -x
mkdir -p /tmp/repo $HOME/.ssh
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
chmod 0400 $HOME/.ssh/privk
git -C /tmp/repo init
cp ${smallDrv} /tmp/repo/jobset.nix
git -C /tmp/repo add .
git config --global user.email test@localhost
git config --global user.name test
git -C /tmp/repo commit -m 'Initial import'
git -C /tmp/repo remote add origin gitea@machine:root/repo
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
git -C /tmp/repo push origin master
git -C /tmp/repo log >&2
'';
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
set -x
su -l hydra -c "hydra-create-user root --email-address \
'alice@example.org' --password foobar --role admin"
URL=http://localhost:3000
USERNAME="root"
PASSWORD="foobar"
PROJECT_NAME="trivial"
JOBSET_NAME="trivial"
mycurl() {
curl --referer $URL -H "Accept: application/json" \
-H "Content-Type: application/json" $@
}
cat >data.json <<EOF
{ "username": "$USERNAME", "password": "$PASSWORD" }
EOF
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
cat >data.json <<EOF
{
"displayname":"Trivial",
"enabled":"1",
"visible":"1"
}
EOF
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
-d @data.json -b hydra-cookie.txt
cat >data.json <<EOF
{
"description": "Trivial",
"checkinterval": "60",
"enabled": "1",
"visible": "1",
"keepnr": "1",
"enableemail": true,
"emailoverride": "hydra@localhost",
"type": 0,
"nixexprinput": "git",
"nixexprpath": "jobset.nix",
"inputs": {
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
"gitea_repo_name": {"value": "repo", "type": "string"},
"gitea_repo_owner": {"value": "root", "type": "string"},
"gitea_status_repo": {"value": "git", "type": "string"},
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
}
}
EOF
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
-d @data.json -b hydra-cookie.txt
'';
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
snakeoilKeypair = {
privkey = pkgs.writeText "privkey.snakeoil" ''
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
-----END EC PRIVATE KEY-----
'';
pubkey = pkgs.lib.concatStrings [
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
];
};
smallDrv = pkgs.writeText "jobset.nix" ''
{ trivial = builtins.derivation {
name = "trivial";
system = "${system}";
builder = "/bin/sh";
allowSubstitutes = false;
preferLocalBuild = true;
args = ["-c" "echo success > $out; exit 0"];
};
}
'';
in
''
import json
machine.start()
machine.wait_for_unit("multi-user.target")
machine.wait_for_open_port(3000)
machine.wait_for_open_port(3001)
machine.succeed(
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
+ "--username root --password root --email test@localhost'"
)
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
machine.succeed(
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
)
machine.succeed(
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
)
machine.succeed(
"${scripts.git-setup}"
)
machine.succeed(
"${scripts.hydra-setup}"
)
machine.wait_until_succeeds(
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
+ '| jq .buildstatus | xargs test 0 -eq'
)
data = machine.succeed(
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+ f"-H 'Authorization: token ${api_token}'"
)
response = json.loads(data)
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
assert response[0]['status'] == "success", "Expected finished status to be success!"
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
machine.shutdown()
'';
});
validate-openapi = forEachSystem (system:
let pkgs = nixpkgs.legacyPackages.${system}; in
pkgs.runCommand "validate-openapi"
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
''
openapi-generator-cli validate -i ${./hydra-api.yaml}
touch $out
'');
}

View File

@@ -1,285 +0,0 @@
{ stdenv
, lib
, fileset
, rawSrc
, buildEnv
, perlPackages
, nixComponents
, git
, makeWrapper
, meson
, ninja
, nukeReferences
, pkg-config
, mdbook
, unzip
, libpqxx
, top-git
, mercurial
, darcs
, subversion
, breezy
, openssl
, bzip2
, libxslt
, perl
, pixz
, boost
, postgresql_13
, nlohmann_json
, prometheus-cpp
, cacert
, foreman
, glibcLocales
, libressl
, openldap
, python3
, openssh
, coreutils
, findutils
, gzip
, xz
, gnutar
, gnused
, nix-eval-jobs
, rpm
, dpkg
, cdrkit
}:
let
perlDeps = buildEnv {
name = "hydra-perl-deps";
paths = lib.closePropagation
([
nixComponents.nix-perl-bindings
git
] ++ (with perlPackages; [
AuthenSASL
CatalystActionREST
CatalystAuthenticationStoreDBIxClass
CatalystAuthenticationStoreLDAP
CatalystDevel
CatalystPluginAccessLog
CatalystPluginAuthorizationRoles
CatalystPluginCaptcha
CatalystPluginPrometheusTiny
CatalystPluginSessionStateCookie
CatalystPluginSessionStoreFastMmap
CatalystPluginStackTrace
CatalystTraitForRequestProxyBase
CatalystViewDownload
CatalystViewJSON
CatalystViewTT
CatalystXRoleApplicator
CatalystXScriptServerStarman
CryptPassphrase
CryptPassphraseArgon2
CryptRandPasswd
DataDump
DateTime
DBDPg
DBDSQLite
DBIxClassHelpers
DigestSHA1
EmailMIME
EmailSender
FileCopyRecursive
FileLibMagic
FileSlurper
FileWhich
IOCompress
IPCRun
IPCRun3
JSON
JSONMaybeXS
JSONXS
ListSomeUtils
LWP
LWPProtocolHttps
ModulePluggable
NetAmazonS3
NetPrometheus
NetStatsd
NumberBytesHuman
PadWalker
ParallelForkManager
PerlCriticCommunity
PrometheusTinyShared
ReadonlyX
SetScalar
SQLSplitStatement
Starman
StringCompareConstantTime
SysHostnameLong
TermSizeAny
TermReadKey
Test2Harness
TestPostgreSQL
TextDiff
TextTable
UUID4Tiny
YAML
XMLSimple
]));
};
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
in
stdenv.mkDerivation (finalAttrs: {
pname = "hydra";
inherit version;
src = fileset.toSource {
root = ./.;
fileset = fileset.unions ([
./doc
./meson.build
./nixos-modules
./src
./t
./version.txt
./.perlcriticrc
]);
};
outputs = [ "out" "doc" ];
strictDeps = true;
nativeBuildInputs = [
makeWrapper
meson
ninja
nukeReferences
pkg-config
mdbook
nixComponents.nix-cli
perlDeps
perl
unzip
];
buildInputs = [
libpqxx
openssl
libxslt
nixComponents.nix-util
nixComponents.nix-store
nixComponents.nix-main
perlDeps
perl
boost
nlohmann_json
prometheus-cpp
];
nativeCheckInputs = [
bzip2
darcs
foreman
top-git
mercurial
subversion
breezy
openldap
postgresql_13
pixz
nix-eval-jobs
];
checkInputs = [
cacert
glibcLocales
libressl.nc
python3
nixComponents.nix-cli
];
hydraPath = lib.makeBinPath (
[
subversion
openssh
nixComponents.nix-cli
coreutils
findutils
pixz
gzip
bzip2
xz
gnutar
unzip
git
top-git
mercurial
darcs
gnused
breezy
nix-eval-jobs
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
);
OPENLDAP_ROOT = openldap;
mesonBuildType = "release";
postPatch = ''
patchShebangs .
'';
shellHook = ''
pushd $(git rev-parse --show-toplevel) >/dev/null
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
export HYDRA_HOME="$(pwd)/src/"
mkdir -p .hydra-data
export HYDRA_DATA="$(pwd)/.hydra-data"
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
popd >/dev/null
'';
doCheck = true;
mesonCheckFlags = [ "--verbose" ];
preCheck = ''
export LOGNAME=''${LOGNAME:-foo}
# set $HOME for bzr so it can create its trace file
export HOME=$(mktemp -d)
'';
postInstall = ''
mkdir -p $out/nix-support
for i in $out/bin/*; do
read -n 4 chars < $i
if [[ $chars =~ ELF ]]; then continue; fi
wrapProgram $i \
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
--prefix PATH ':' $out/bin:$hydraPath \
--set HYDRA_RELEASE ${version} \
--set HYDRA_HOME $out/libexec/hydra \
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
done
'';
dontStrip = true;
meta.description = "Build of Hydra on ${stdenv.system}";
passthru = {
inherit perlDeps;
nix = nixComponents.nix-cli;
};
})

3
src/Makefile.am Normal file
View File

@@ -0,0 +1,3 @@
SUBDIRS = hydra-evaluator hydra-eval-jobs hydra-queue-runner sql script lib root ttf
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
DIST_SUBDIRS = $(SUBDIRS)

View File

@@ -0,0 +1,5 @@
bin_PROGRAMS = hydra-eval-jobs
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
hydra_eval_jobs_LDADD = $(NIX_LIBS) -lnixcmd
hydra_eval_jobs_CXXFLAGS = $(NIX_CFLAGS) -I ../libhydra

View File

@@ -0,0 +1,558 @@
#include <iostream>
#include <thread>
#include <optional>
#include <unordered_map>
#include "shared.hh"
#include "store-api.hh"
#include "eval.hh"
#include "eval-inline.hh"
#include "util.hh"
#include "get-drvs.hh"
#include "globals.hh"
#include "common-eval-args.hh"
#include "flake/flakeref.hh"
#include "flake/flake.hh"
#include "attr-path.hh"
#include "derivations.hh"
#include "local-fs-store.hh"
#include "hydra-config.hh"
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/resource.h>
#include <nlohmann/json.hpp>
void check_pid_status_nonblocking(pid_t check_pid) {
// Only check 'initialized' and known PID's
if (check_pid <= 0) { return; }
int wstatus = 0;
pid_t pid = waitpid(check_pid, &wstatus, WNOHANG);
// -1 = failure, WNOHANG: 0 = no change
if (pid <= 0) { return; }
std::cerr << "child process (" << pid << ") ";
if (WIFEXITED(wstatus)) {
std::cerr << "exited with status=" << WEXITSTATUS(wstatus) << std::endl;
} else if (WIFSIGNALED(wstatus)) {
std::cerr << "killed by signal=" << WTERMSIG(wstatus) << std::endl;
} else if (WIFSTOPPED(wstatus)) {
std::cerr << "stopped by signal=" << WSTOPSIG(wstatus) << std::endl;
} else if (WIFCONTINUED(wstatus)) {
std::cerr << "continued" << std::endl;
}
}
using namespace nix;
static Path gcRootsDir;
static size_t maxMemorySize;
struct MyArgs : MixEvalArgs, MixCommonArgs
{
Path releaseExpr;
bool flake = false;
bool dryRun = false;
MyArgs() : MixCommonArgs("hydra-eval-jobs")
{
addFlag({
.longName = "gc-roots-dir",
.description = "garbage collector roots directory",
.labels = {"path"},
.handler = {&gcRootsDir}
});
addFlag({
.longName = "dry-run",
.description = "don't create store derivations",
.handler = {&dryRun, true}
});
addFlag({
.longName = "flake",
.description = "build a flake",
.handler = {&flake, true}
});
expectArg("expr", &releaseExpr);
}
};
static MyArgs myArgs;
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
{
Strings res;
std::function<void(Value & v)> rec;
rec = [&](Value & v) {
state.forceValue(v, noPos);
if (v.type() == nString)
res.push_back(v.string.s);
else if (v.isList())
for (unsigned int n = 0; n < v.listSize(); ++n)
rec(*v.listElems()[n]);
else if (v.type() == nAttrs) {
auto a = v.attrs->find(state.symbols.create(subAttribute));
if (a != v.attrs->end())
res.push_back(std::string(state.forceString(*a->value)));
}
};
Value * v = drv.queryMeta(name);
if (v) rec(*v);
return concatStringsSep(", ", res);
}
static void worker(
EvalState & state,
Bindings & autoArgs,
AutoCloseFD & to,
AutoCloseFD & from)
{
Value vTop;
if (myArgs.flake) {
using namespace flake;
auto flakeRef = parseFlakeRef(myArgs.releaseExpr);
auto vFlake = state.allocValue();
auto lockedFlake = lockFlake(state, flakeRef,
LockFlags {
.updateLockFile = false,
.useRegistries = false,
.allowUnlocked = false,
});
callFlake(state, lockedFlake, *vFlake);
auto vOutputs = vFlake->attrs->get(state.symbols.create("outputs"))->value;
state.forceValue(*vOutputs, noPos);
auto aHydraJobs = vOutputs->attrs->get(state.symbols.create("hydraJobs"));
if (!aHydraJobs)
aHydraJobs = vOutputs->attrs->get(state.symbols.create("checks"));
if (!aHydraJobs)
throw Error("flake '%s' does not provide any Hydra jobs or checks", flakeRef);
vTop = *aHydraJobs->value;
} else {
state.evalFile(lookupFileArg(state, myArgs.releaseExpr), vTop);
}
auto vRoot = state.allocValue();
state.autoCallFunction(autoArgs, vTop, *vRoot);
while (true) {
/* Wait for the master to send us a job name. */
writeLine(to.get(), "next");
auto s = readLine(from.get());
if (s == "exit") break;
if (!hasPrefix(s, "do ")) abort();
std::string attrPath(s, 3);
debug("worker process %d at '%s'", getpid(), attrPath);
/* Evaluate it and send info back to the master. */
nlohmann::json reply;
try {
auto vTmp = findAlongAttrPath(state, attrPath, autoArgs, *vRoot).first;
auto v = state.allocValue();
state.autoCallFunction(autoArgs, *vTmp, *v);
if (auto drv = getDerivation(state, *v, false)) {
DrvInfo::Outputs outputs = drv->queryOutputs();
if (drv->querySystem() == "unknown")
throw EvalError("derivation must have a 'system' attribute");
auto drvPath = state.store->printStorePath(drv->requireDrvPath());
nlohmann::json job;
job["nixName"] = drv->queryName();
job["system"] =drv->querySystem();
job["drvPath"] = drvPath;
job["description"] = drv->queryMetaString("description");
job["license"] = queryMetaStrings(state, *drv, "license", "shortName");
job["homepage"] = drv->queryMetaString("homepage");
job["maintainers"] = queryMetaStrings(state, *drv, "maintainers", "email");
job["schedulingPriority"] = drv->queryMetaInt("schedulingPriority", 100);
job["timeout"] = drv->queryMetaInt("timeout", 36000);
job["maxSilent"] = drv->queryMetaInt("maxSilent", 7200);
job["isChannel"] = drv->queryMetaBool("isHydraChannel", false);
/* If this is an aggregate, then get its constituents. */
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
if (a && state.forceBool(*a->value, a->pos)) {
auto a = v->attrs->get(state.symbols.create("constituents"));
if (!a)
throw EvalError("derivation must have a constituents attribute");
PathSet context;
state.coerceToString(a->pos, *a->value, context, true, false);
for (auto & i : context)
if (i.at(0) == '!') {
size_t index = i.find("!", 1);
job["constituents"].push_back(std::string(i, index + 1));
}
state.forceList(*a->value, a->pos);
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
auto v = a->value->listElems()[n];
state.forceValue(*v, noPos);
if (v->type() == nString)
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
}
}
/* Register the derivation as a GC root. !!! This
registers roots for jobs that we may have already
done. */
auto localStore = state.store.dynamic_pointer_cast<LocalFSStore>();
if (gcRootsDir != "" && localStore) {
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
if (!pathExists(root))
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
}
nlohmann::json out;
for (auto & j : outputs)
// FIXME: handle CA/impure builds.
if (j.second)
out[j.first] = state.store->printStorePath(*j.second);
job["outputs"] = std::move(out);
reply["job"] = std::move(job);
}
else if (v->type() == nAttrs) {
auto attrs = nlohmann::json::array();
StringSet ss;
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
std::string name(state.symbols[i->name]);
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
printError("skipping job with illegal name '%s'", name);
continue;
}
attrs.push_back(name);
}
reply["attrs"] = std::move(attrs);
}
else if (v->type() == nNull)
;
else throw TypeError("attribute '%s' is %s, which is not supported", attrPath, showType(*v));
} catch (EvalError & e) {
auto msg = e.msg();
// Transmits the error we got from the previous evaluation
// in the JSON output.
reply["error"] = filterANSIEscapes(msg, true);
// Don't forget to print it into the STDERR log, this is
// what's shown in the Hydra UI.
printError(msg);
}
writeLine(to.get(), reply.dump());
/* If our RSS exceeds the maximum, exit. The master will
start a new process. */
struct rusage r;
getrusage(RUSAGE_SELF, &r);
if ((size_t) r.ru_maxrss > maxMemorySize * 1024) break;
}
writeLine(to.get(), "restart");
}
int main(int argc, char * * argv)
{
/* Prevent undeclared dependencies in the evaluation via
$NIX_PATH. */
unsetenv("NIX_PATH");
return handleExceptions(argv[0], [&]() {
auto config = std::make_unique<HydraConfig>();
auto nrWorkers = config->getIntOption("evaluator_workers", 1);
maxMemorySize = config->getIntOption("evaluator_max_memory_size", 4096);
initNix();
initGC();
myArgs.parseCmdline(argvToStrings(argc, argv));
auto pureEval = config->getBoolOption("evaluator_pure_eval", myArgs.flake);
/* FIXME: The build hook in conjunction with import-from-derivation is causing "unexpected EOF" during eval */
settings.builders = "";
/* Prevent access to paths outside of the Nix search path and
to the environment. */
evalSettings.restrictEval = true;
/* When building a flake, use pure evaluation (no access to
'getEnv', 'currentSystem' etc. */
evalSettings.pureEval = pureEval;
if (myArgs.dryRun) settings.readOnlyMode = true;
if (myArgs.releaseExpr == "") throw UsageError("no expression specified");
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
struct State
{
std::set<std::string> todo{""};
std::set<std::string> active;
nlohmann::json jobs;
std::exception_ptr exc;
};
std::condition_variable wakeup;
Sync<State> state_;
/* Start a handler thread per worker process. */
auto handler = [&]()
{
pid_t pid = -1;
try {
AutoCloseFD from, to;
while (true) {
/* Start a new worker process if necessary. */
if (pid == -1) {
Pipe toPipe, fromPipe;
toPipe.create();
fromPipe.create();
pid = startProcess(
[&,
to{std::make_shared<AutoCloseFD>(std::move(fromPipe.writeSide))},
from{std::make_shared<AutoCloseFD>(std::move(toPipe.readSide))}
]()
{
try {
EvalState state(myArgs.searchPath, openStore());
Bindings & autoArgs = *myArgs.getAutoArgs(state);
worker(state, autoArgs, *to, *from);
} catch (Error & e) {
nlohmann::json err;
auto msg = e.msg();
err["error"] = filterANSIEscapes(msg, true);
printError(msg);
writeLine(to->get(), err.dump());
// Don't forget to print it into the STDERR log, this is
// what's shown in the Hydra UI.
writeLine(to->get(), "restart");
}
},
ProcessOptions { .allowVfork = false });
from = std::move(fromPipe.readSide);
to = std::move(toPipe.writeSide);
debug("created worker process %d", pid);
}
/* Check whether the existing worker process is still there. */
auto s = readLine(from.get());
if (s == "restart") {
pid = -1;
continue;
} else if (s != "next") {
auto json = nlohmann::json::parse(s);
throw Error("worker error: %s", (std::string) json["error"]);
}
/* Wait for a job name to become available. */
std::string attrPath;
while (true) {
checkInterrupt();
auto state(state_.lock());
if ((state->todo.empty() && state->active.empty()) || state->exc) {
writeLine(to.get(), "exit");
return;
}
if (!state->todo.empty()) {
attrPath = *state->todo.begin();
state->todo.erase(state->todo.begin());
state->active.insert(attrPath);
break;
} else
state.wait(wakeup);
}
/* Tell the worker to evaluate it. */
writeLine(to.get(), "do " + attrPath);
/* Wait for the response. */
auto response = nlohmann::json::parse(readLine(from.get()));
/* Handle the response. */
StringSet newAttrs;
if (response.find("job") != response.end()) {
auto state(state_.lock());
state->jobs[attrPath] = response["job"];
}
if (response.find("attrs") != response.end()) {
for (auto & i : response["attrs"]) {
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
newAttrs.insert(s);
}
}
if (response.find("error") != response.end()) {
auto state(state_.lock());
state->jobs[attrPath]["error"] = response["error"];
}
/* Add newly discovered job names to the queue. */
{
auto state(state_.lock());
state->active.erase(attrPath);
for (auto & s : newAttrs)
state->todo.insert(s);
wakeup.notify_all();
}
}
} catch (...) {
check_pid_status_nonblocking(pid);
auto state(state_.lock());
state->exc = std::current_exception();
wakeup.notify_all();
}
};
std::vector<std::thread> threads;
for (size_t i = 0; i < nrWorkers; i++)
threads.emplace_back(std::thread(handler));
for (auto & thread : threads)
thread.join();
auto state(state_.lock());
if (state->exc)
std::rethrow_exception(state->exc);
/* For aggregate jobs that have named consistuents
(i.e. constituents that are a job name rather than a
derivation), look up the referenced job and add it to the
dependencies of the aggregate derivation. */
auto store = openStore();
for (auto i = state->jobs.begin(); i != state->jobs.end(); ++i) {
auto jobName = i.key();
auto & job = i.value();
auto named = job.find("namedConstituents");
if (named == job.end()) continue;
std::unordered_map<std::string, std::string> brokenJobs;
auto getNonBrokenJobOrRecordError = [&brokenJobs, &jobName, &state](
const std::string & childJobName) -> std::optional<nlohmann::json> {
auto childJob = state->jobs.find(childJobName);
if (childJob == state->jobs.end()) {
printError("aggregate job '%s' references non-existent job '%s'", jobName, childJobName);
brokenJobs[childJobName] = "does not exist";
return std::nullopt;
}
if (childJob->find("error") != childJob->end()) {
std::string error = (*childJob)["error"];
printError("aggregate job '%s' references broken job '%s': %s", jobName, childJobName, error);
brokenJobs[childJobName] = error;
return std::nullopt;
}
return *childJob;
};
if (myArgs.dryRun) {
for (std::string jobName2 : *named) {
auto job2 = getNonBrokenJobOrRecordError(jobName2);
if (!job2) {
continue;
}
std::string drvPath2 = (*job2)["drvPath"];
job["constituents"].push_back(drvPath2);
}
} else {
auto drvPath = store->parseStorePath((std::string) job["drvPath"]);
auto drv = store->readDerivation(drvPath);
for (std::string jobName2 : *named) {
auto job2 = getNonBrokenJobOrRecordError(jobName2);
if (!job2) {
continue;
}
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
auto drv2 = store->readDerivation(drvPath2);
job["constituents"].push_back(store->printStorePath(drvPath2));
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
}
if (brokenJobs.empty()) {
std::string drvName(drvPath.name());
assert(hasSuffix(drvName, drvExtension));
drvName.resize(drvName.size() - drvExtension.size());
auto hashModulo = hashDerivationModulo(*store, drv, true);
if (hashModulo.kind != DrvHash::Kind::Regular) continue;
auto h = hashModulo.hashes.find("out");
if (h == hashModulo.hashes.end()) continue;
auto outPath = store->makeOutputPath("out", h->second, drvName);
drv.env["out"] = store->printStorePath(outPath);
drv.outputs.insert_or_assign("out", DerivationOutput::InputAddressed { .path = outPath });
auto newDrvPath = store->printStorePath(writeDerivation(*store, drv));
debug("rewrote aggregate derivation %s -> %s", store->printStorePath(drvPath), newDrvPath);
job["drvPath"] = newDrvPath;
job["outputs"]["out"] = store->printStorePath(outPath);
}
}
job.erase("namedConstituents");
/* Register the derivation as a GC root. !!! This
registers roots for jobs that we may have already
done. */
auto localStore = store.dynamic_pointer_cast<LocalFSStore>();
if (gcRootsDir != "" && localStore) {
auto drvPath = job["drvPath"].get<std::string>();
Path root = gcRootsDir + "/" + std::string(baseNameOf(drvPath));
if (!pathExists(root))
localStore->addPermRoot(localStore->parseStorePath(drvPath), root);
}
if (!brokenJobs.empty()) {
std::stringstream ss;
for (const auto& [jobName, error] : brokenJobs) {
ss << jobName << ": " << error << "\n";
}
job["error"] = ss.str();
}
}
std::cout << state->jobs.dump(2) << "\n";
});
}

View File

@@ -0,0 +1,5 @@
bin_PROGRAMS = hydra-evaluator
hydra_evaluator_SOURCES = hydra-evaluator.cc
hydra_evaluator_LDADD = $(NIX_LIBS) -lpqxx
hydra_evaluator_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations

View File

@@ -1,8 +1,7 @@
#include "db.hh"
#include "hydra-config.hh"
#include <nix/util/pool.hh>
#include <nix/main/shared.hh>
#include <nix/util/signals.hh>
#include "pool.hh"
#include "shared.hh"
#include <algorithm>
#include <thread>
@@ -38,7 +37,7 @@ class JobsetId {
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
std::string display() const {
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
return str(format("%1%:%2% (jobset#%3%)") % project % jobset % id);
}
};
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
@@ -180,8 +179,10 @@ struct Evaluator
{
auto conn(dbPool.get());
pqxx::work txn(*conn);
txn.exec("update Jobsets set startTime = $1 where id = $2",
pqxx::params{now, jobset.name.id}).no_rows();
txn.exec_params0
("update Jobsets set startTime = $1 where id = $2",
now,
jobset.name.id);
txn.commit();
}
@@ -232,7 +233,7 @@ struct Evaluator
pqxx::work txn(*conn);
if (jobset.evaluation_style == EvaluationStyle::ONE_AT_A_TIME) {
auto evaluation_res = txn.exec
auto evaluation_res = txn.exec_params
("select id from JobsetEvals "
"where jobset_id = $1 "
"order by id desc limit 1"
@@ -248,7 +249,7 @@ struct Evaluator
auto evaluation_id = evaluation_res[0][0].as<int>();
auto unfinished_build_res = txn.exec
auto unfinished_build_res = txn.exec_params
("select id from Builds "
"join JobsetEvalMembers "
" on (JobsetEvalMembers.build = Builds.id) "
@@ -365,9 +366,6 @@ struct Evaluator
printInfo("received jobset event");
}
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) {
printError("exception in database monitor thread: %s", e.what());
sleep(30);
@@ -418,18 +416,21 @@ struct Evaluator
/* Clear the trigger time to prevent this
jobset from getting stuck in an endless
failing eval loop. */
txn.exec
txn.exec_params0
("update Jobsets set triggerTime = null where id = $1 and startTime is not null and triggerTime <= startTime",
jobset.name.id).no_rows();
jobset.name.id);
/* Clear the start time. */
txn.exec
txn.exec_params0
("update Jobsets set startTime = null where id = $1",
jobset.name.id).no_rows();
jobset.name.id);
if (!WIFEXITED(status) || WEXITSTATUS(status) > 1) {
txn.exec("update Jobsets set errorMsg = $1, lastCheckedTime = $2, errorTime = $2, fetchErrorMsg = null where id = $3",
pqxx::params{fmt("evaluation %s", statusToString(status)), now, jobset.name.id}).no_rows();
txn.exec_params0
("update Jobsets set errorMsg = $1, lastCheckedTime = $2, errorTime = $2, fetchErrorMsg = null where id = $3",
fmt("evaluation %s", statusToString(status)),
now,
jobset.name.id);
}
txn.commit();
@@ -454,7 +455,7 @@ struct Evaluator
{
auto conn(dbPool.get());
pqxx::work txn(*conn);
txn.exec("update Jobsets set startTime = null").no_rows();
txn.exec("update Jobsets set startTime = null");
txn.commit();
}
@@ -472,9 +473,6 @@ struct Evaluator
while (true) {
try {
loop();
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) {
printError("exception in main loop: %s", e.what());
sleep(30);

View File

@@ -1,10 +0,0 @@
hydra_evaluator = executable('hydra-evaluator',
'hydra-evaluator.cc',
dependencies: [
libhydra_dep,
nix_util_dep,
nix_main_dep,
pqxx_dep,
],
install: true,
)

View File

@@ -0,0 +1,8 @@
bin_PROGRAMS = hydra-queue-runner
hydra_queue_runner_SOURCES = hydra-queue-runner.cc queue-monitor.cc dispatcher.cc \
builder.cc build-result.cc build-remote.cc \
hydra-build-result.hh counter.hh state.hh db.hh \
nar-extractor.cc nar-extractor.hh
hydra_queue_runner_LDADD = $(NIX_LIBS) -lpqxx -lprometheus-cpp-pull -lprometheus-cpp-core
hydra_queue_runner_CXXFLAGS = $(NIX_CFLAGS) -Wall -I ../libhydra -Wno-deprecated-declarations

View File

@@ -5,78 +5,107 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <nix/store/build-result.hh>
#include <nix/store/path.hh>
#include <nix/store/legacy-ssh-store.hh>
#include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh>
#include "build-result.hh"
#include "serve-protocol.hh"
#include "state.hh"
#include <nix/util/current-process.hh>
#include <nix/util/processes.hh>
#include <nix/util/util.hh>
#include <nix/store/export-import.hh>
#include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh>
#include <nix/store/ssh.hh>
#include <nix/util/finally.hh>
#include <nix/util/url.hh>
#include "util.hh"
#include "worker-protocol.hh"
#include "finally.hh"
#include "url.hh"
using namespace nix;
bool ::Machine::isLocalhost() const
struct Child
{
return storeUri.params.empty() && std::visit(overloaded {
[](const StoreReference::Auto &) {
return true;
},
[](const StoreReference::Specified & s) {
return
(s.scheme == "local" || s.scheme == "unix") ||
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
s.authority == "localhost");
},
}, storeUri.variant);
Pid pid;
AutoCloseFD to, from;
};
static void append(Strings & dst, const Strings & src)
{
dst.insert(dst.end(), src.begin(), src.end());
}
namespace nix::build_remote {
static std::unique_ptr<SSHMaster::Connection> openConnection(
::Machine::ptr machine, SSHMaster & master)
static Strings extraStoreArgs(std::string & machine)
{
Strings command = {"nix-store", "--serve", "--write"};
if (machine->isLocalhost()) {
command.push_back("--builders");
command.push_back("");
} else {
auto remoteStore = machine->storeUri.params.find("remote-store");
if (remoteStore != machine->storeUri.params.end()) {
command.push_back("--store");
command.push_back(escapeShellArgAlways(remoteStore->second));
Strings result;
try {
auto parsed = parseURL(machine);
if (parsed.scheme != "ssh") {
throw SysError("Currently, only (legacy-)ssh stores are supported!");
}
machine = parsed.authority.value_or("");
auto remoteStore = parsed.query.find("remote-store");
if (remoteStore != parsed.query.end()) {
result = {"--store", shellEscape(remoteStore->second)};
}
} catch (BadURL &) {
// We just try to continue with `machine->sshName` here for backwards compat.
}
auto ret = master.startCommand(std::move(command), {
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
return result;
}
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
{
std::string pgmName;
Pipe to, from;
to.create();
from.create();
Strings argv;
if (machine->isLocalhost()) {
pgmName = "nix-store";
argv = {"nix-store", "--builders", "", "--serve", "--write"};
} else {
pgmName = "ssh";
auto sshName = machine->sshName;
Strings extraArgs = extraStoreArgs(sshName);
argv = {"ssh", sshName};
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
if (machine->sshPublicHostKey != "") {
Path fileName = tmpDir + "/host-key";
auto p = machine->sshName.find("@");
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
append(argv, {"-oUserKnownHostsFile=" + fileName});
}
append(argv,
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
, "--", "nix-store", "--serve", "--write" });
append(argv, extraArgs);
}
child.pid = startProcess([&]() {
restoreProcessContext();
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
throw SysError("cannot dup input pipe to stdin");
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
throw SysError("cannot dup output pipe to stdout");
if (dup2(stderrFD, STDERR_FILENO) == -1)
throw SysError("cannot dup stderr");
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
throw SysError("cannot start %s", pgmName);
});
// XXX: determine the actual max value we can use from /proc.
to.readSide = -1;
from.writeSide = -1;
// FIXME: Should this be upstreamed into `startCommand` in Nix?
int pipesize = 1024 * 1024;
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
return ret;
child.to = to.writeSide.release();
child.from = from.readSide.release();
}
static void copyClosureTo(
::Machine::Connection & conn,
Store & destStore,
const StorePathSet & paths,
SubstituteFlag useSubstitutes = NoSubstitute)
static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
FdSource & from, FdSink & to, const StorePathSet & paths,
bool useSubstitutes = false)
{
StorePathSet closure;
destStore.computeFSClosure(paths, closure);
@@ -86,10 +115,13 @@ static void copyClosureTo(
garbage-collect paths that are already there. Optionally, ask
the remote host to substitute missing paths. */
// FIXME: substitute output pollutes our build log
to << cmdQueryValidPaths << 1 << useSubstitutes;
worker_proto::write(destStore, to, closure);
to.flush();
/* Get back the set of paths that are already valid on the remote
host. */
auto present = conn.queryValidPaths(
destStore, true, closure, useSubstitutes);
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {});
if (present.size() == closure.size()) return;
@@ -101,20 +133,20 @@ static void copyClosureTo(
printMsg(lvlDebug, "sending %d missing paths", missing.size());
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
std::chrono::seconds(600));
conn.to << ServeProto::Command::ImportPaths;
exportPaths(destStore, missing, conn.to);
conn.to.flush();
to << cmdImportPaths;
destStore.exportPaths(missing, to);
to.flush();
if (readInt(conn.from) != 1)
if (readInt(from) != 1)
throw Error("remote machine failed to import closure");
}
// FIXME: use Store::topoSortPaths().
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
{
StorePaths sorted;
StorePathSet visited;
@@ -142,320 +174,40 @@ static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPat
return sorted;
}
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
{
std::string base(drvPath.to_string());
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
createDirs(dirOf(logFile));
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (!logFD) throw SysError("creating log file %s", logFile);
return {std::move(logFile), std::move(logFD)};
}
static BasicDerivation sendInputs(
State & state,
Step & step,
Store & localStore,
Store & destStore,
::Machine::Connection & conn,
unsigned int & overhead,
counter & nrStepsWaiting,
counter & nrStepsCopyingTo
)
{
/* Replace the input derivations by their output paths to send a
minimal closure to the builder.
`tryResolve` currently does *not* rewrite input addresses, so it
is safe to do this in all cases. (It should probably have a mode
to do that, however, but we would not use it here.)
*/
BasicDerivation basicDrv = ({
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
if (!maybeBasicDrv)
throw Error(
"the derivation '%s' cant be resolved. Its probably "
"missing some outputs",
localStore.printStorePath(step.drvPath));
*maybeBasicDrv;
});
/* Ensure that the inputs exist in the destination store. This is
a no-op for regular stores, but for the binary cache store,
this will copy the inputs to the binary cache from the local
store. */
if (&localStore != &destStore) {
copyClosure(localStore, destStore,
step.drv->inputSrcs,
NoRepair, NoCheckSigs, NoSubstitute);
}
{
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
mc1.reset();
MaintainCount<counter> mc2(nrStepsCopyingTo);
printMsg(lvlDebug, "sending closure of %s to %s",
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
auto now1 = std::chrono::steady_clock::now();
/* Copy the input closure. */
if (conn.machine->isLocalhost()) {
StorePathSet closure;
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
} else {
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
}
auto now2 = std::chrono::steady_clock::now();
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
return basicDrv;
}
static BuildResult performBuild(
::Machine::Connection & conn,
Store & localStore,
StorePath drvPath,
const BasicDerivation & drv,
const ServeProto::BuildOptions & options,
counter & nrStepsBuilding
)
{
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
BuildResult result;
time_t startTime, stopTime;
startTime = time(0);
{
MaintainCount<counter> mc(nrStepsBuilding);
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
}
stopTime = time(0);
if (!result.startTime) {
// If the builder gave `startTime = 0`, use our measurements
// instead of the builder's.
//
// Note: this represents the duration of a single round, rather
// than all rounds.
result.startTime = startTime;
result.stopTime = stopTime;
}
// If the protocol was too old to give us `builtOutputs`, initialize
// it manually by introspecting the derivation.
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
{
// If the remote is too old to handle CA derivations, we cant get this
// far anyways
assert(drv.type().hasKnownOutputPaths());
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
// Since this a `BasicDerivation`, `staticOutputHashes` will not
// do any real work.
auto outputHashes = staticOutputHashes(localStore, drv);
if (auto * successP = result.tryGetSuccess()) {
for (auto & [outputName, output] : drvOutputs) {
auto outputPath = output.second;
// Weve just asserted that the output paths of the derivation
// were known
assert(outputPath);
auto outputHash = outputHashes.at(outputName);
auto drvOutput = DrvOutput { outputHash, outputName };
successP->builtOutputs.insert_or_assign(
std::move(outputName),
Realisation { drvOutput, *outputPath });
}
}
}
return result;
}
static void copyPathFromRemote(
::Machine::Connection & conn,
NarMemberDatas & narMembers,
Store & localStore,
Store & destStore,
const ValidPathInfo & info
)
{
/* Receive the NAR from the remote and add it to the
destination store. Meanwhile, extract all the info from the
NAR that getBuildOutput() needs. */
auto source2 = sinkToSource([&](Sink & sink)
{
/* Note: we should only send the command to dump the store
path to the remote if the NAR is actually going to get read
by the destination store, which won't happen if this path
is already valid on the destination store. Since this
lambda function only gets executed if someone tries to read
from source2, we will send the command from here rather
than outside the lambda. */
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
conn.to.flush();
TeeSource tee(conn.from, sink);
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
});
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
}
static void copyPathsFromRemote(
::Machine::Connection & conn,
NarMemberDatas & narMembers,
Store & localStore,
Store & destStore,
const std::map<StorePath, UnkeyedValidPathInfo> & infos
)
{
auto pathsSorted = reverseTopoSortPaths(infos);
for (auto & path : pathsSorted) {
auto & info = infos.find(path)->second;
copyPathFromRemote(
conn, narMembers, localStore, destStore,
ValidPathInfo { path, info });
}
}
}
/* using namespace nix::build_remote; */
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
{
startTime = buildResult.startTime;
stopTime = buildResult.stopTime;
timesBuilt = buildResult.timesBuilt;
std::visit(overloaded{
[&](const BuildResult::Success & success) {
stepStatus = bsSuccess;
switch (success.status) {
case BuildResult::Success::Built:
break;
case BuildResult::Success::Substituted:
case BuildResult::Success::AlreadyValid:
case BuildResult::Success::ResolvesToAlreadyValid:
isCached = true;
break;
default:
assert(false);
}
},
[&](const BuildResult::Failure & failure) {
errorMsg = failure.errorMsg;
isNonDeterministic = failure.isNonDeterministic;
switch (failure.status) {
case BuildResult::Failure::PermanentFailure:
stepStatus = bsFailed;
canCache = true;
errorMsg = "";
break;
case BuildResult::Failure::InputRejected:
case BuildResult::Failure::OutputRejected:
stepStatus = bsFailed;
canCache = true;
break;
case BuildResult::Failure::TransientFailure:
stepStatus = bsFailed;
canRetry = true;
errorMsg = "";
break;
case BuildResult::Failure::TimedOut:
stepStatus = bsTimedOut;
errorMsg = "";
break;
case BuildResult::Failure::MiscFailure:
stepStatus = bsAborted;
canRetry = true;
break;
case BuildResult::Failure::LogLimitExceeded:
stepStatus = bsLogLimitExceeded;
break;
case BuildResult::Failure::NotDeterministic:
stepStatus = bsNotDeterministic;
canRetry = false;
canCache = true;
break;
case BuildResult::Failure::CachedFailure:
case BuildResult::Failure::DependencyFailed:
case BuildResult::Failure::NoSubstituters:
case BuildResult::Failure::HashMismatch:
stepStatus = bsAborted;
break;
default:
assert(false);
}
},
}, buildResult.inner);
}
/* Utility guard object to auto-release a semaphore on destruction. */
template <typename T>
class SemaphoreReleaser {
public:
SemaphoreReleaser(T* s) : sem(s) {}
~SemaphoreReleaser() { sem->release(); }
private:
T* sem;
};
void State::buildRemote(ref<Store> destStore,
std::unique_ptr<MachineReservation> reservation,
::Machine::ptr machine, Step::ptr step,
const ServeProto::BuildOptions & buildOptions,
Machine::ptr machine, Step::ptr step,
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
std::function<void(StepState)> updateStep,
NarMemberDatas & narMembers)
{
assert(BuildResult::Failure::TimedOut == 8);
assert(BuildResult::TimedOut == 8);
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
AutoDelete logFileDel(logFile, false);
result.logFile = logFile;
std::string base(step->drvPath.to_string());
result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
AutoDelete autoDelete(result.logFile, false);
createDirs(dirOf(result.logFile));
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (!logFD) throw SysError("creating log file %s", result.logFile);
nix::Path tmpDir = createTempDir();
AutoDelete tmpDirDel(tmpDir, true);
try {
updateStep(ssConnecting);
auto storeRef = machine->completeStoreReference();
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
if (!pSpecified || pSpecified->scheme != "ssh") {
throw Error("Currently, only (legacy-)ssh stores are supported!");
}
LegacySSHStoreConfig storeConfig {
pSpecified->scheme,
pSpecified->authority,
storeRef.params
};
auto master = storeConfig.createSSHMaster(
false, // no SSH master yet
logFD.get());
// FIXME: rewrite to use Store.
auto child = build_remote::openConnection(machine, master);
Child child;
openConnection(machine, tmpDir, logFD.get(), child);
{
auto activeStepState(activeStep->state_.lock());
if (activeStepState->cancelled) throw Error("step cancelled");
activeStepState->pid = child->sshPid;
activeStepState->pid = child.pid;
}
Finally clearPid([&]() {
@@ -470,33 +222,34 @@ void State::buildRemote(ref<Store> destStore,
process. Meh. */
});
::Machine::Connection conn {
{
.to = child->in.get(),
.from = child->out.get(),
/* Handshake. */
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
},
/*.machine =*/ machine,
};
FdSource from(child.from.get());
FdSink to(child.to.get());
Finally updateStats([&]() {
bytesReceived += conn.from.read;
bytesSent += conn.to.written;
bytesReceived += from.read;
bytesSent += to.written;
});
constexpr ServeProto::Version our_version = 0x206;
/* Handshake. */
unsigned int remoteVersion;
try {
conn.remoteVersion = decltype(conn)::handshake(
conn.to,
conn.from,
our_version,
machine->storeUri.render());
to << SERVE_MAGIC_1 << 0x206;
to.flush();
unsigned int magic = readInt(from);
if (magic != SERVE_MAGIC_2)
throw Error("protocol mismatch with nix-store --serve on %1%", machine->sshName);
remoteVersion = readInt(from);
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
throw Error("unsupported nix-store --serve protocol version on %1%", machine->sshName);
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
throw Error("machine %1% does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
} catch (EndOfFile & e) {
child->sshPid.wait();
child.pid.wait();
std::string s = chomp(readFile(result.logFile));
throw Error("cannot connect to %1%: %2%", machine->storeUri.render(), s);
throw Error("cannot connect to %1%: %2%", machine->sshName, s);
}
{
@@ -510,12 +263,62 @@ void State::buildRemote(ref<Store> destStore,
copy the immediate sources of the derivation and the required
outputs of the input derivations. */
updateStep(ssSendingInputs);
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
logFileDel.cancel();
StorePathSet inputs;
BasicDerivation basicDrv(*step->drv);
for (auto & p : step->drv->inputSrcs)
inputs.insert(p);
for (auto & input : step->drv->inputDrvs) {
auto drv2 = localStore->readDerivation(input.first);
for (auto & name : input.second) {
if (auto i = get(drv2.outputs, name)) {
auto outPath = i->path(*localStore, drv2.name, name);
inputs.insert(*outPath);
basicDrv.inputSrcs.insert(*outPath);
}
}
}
/* Ensure that the inputs exist in the destination store. This is
a no-op for regular stores, but for the binary cache store,
this will copy the inputs to the binary cache from the local
store. */
if (localStore != std::shared_ptr<Store>(destStore)) {
copyClosure(*localStore, *destStore,
step->drv->inputSrcs,
NoRepair, NoCheckSigs, NoSubstitute);
}
{
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
mc1.reset();
MaintainCount<counter> mc2(nrStepsCopyingTo);
printMsg(lvlDebug, "sending closure of %s to %s",
localStore->printStorePath(step->drvPath), machine->sshName);
auto now1 = std::chrono::steady_clock::now();
/* Copy the input closure. */
if (machine->isLocalhost()) {
StorePathSet closure;
destStore->computeFSClosure(inputs, closure);
copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
} else {
copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true);
}
auto now2 = std::chrono::steady_clock::now();
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
autoDelete.cancel();
/* Truncate the log to get rid of messages about substitutions
etc. on the remote system. */
etc. on the remote system. */
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
throw SysError("seeking to the start of log file %s", result.logFile);
@@ -527,21 +330,89 @@ void State::buildRemote(ref<Store> destStore,
/* Do the build. */
printMsg(lvlDebug, "building %s on %s",
localStore->printStorePath(step->drvPath),
machine->storeUri.render());
machine->sshName);
updateStep(ssBuilding);
auto buildResult = build_remote::performBuild(
conn,
*localStore,
step->drvPath,
resolvedDrv,
buildOptions,
nrStepsBuilding
);
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
writeDerivation(to, *localStore, basicDrv);
to << maxSilentTime << buildTimeout;
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
to << maxLogSize;
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
to << repeats // == build-repeat
<< step->isDeterministic; // == enforce-determinism
}
to.flush();
result.updateWithBuildResult(buildResult);
result.startTime = time(0);
int res;
{
MaintainCount<counter> mc(nrStepsBuilding);
res = readInt(from);
}
result.stopTime = time(0);
result.errorMsg = readString(from);
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
result.timesBuilt = readInt(from);
result.isNonDeterministic = readInt(from);
auto start = readInt(from);
auto stop = readInt(from);
if (start && start) {
/* Note: this represents the duration of a single
round, rather than all rounds. */
result.startTime = start;
result.stopTime = stop;
}
}
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
worker_proto::read(*localStore, from, Phantom<DrvOutputs> {});
}
switch ((BuildResult::Status) res) {
case BuildResult::Built:
result.stepStatus = bsSuccess;
break;
case BuildResult::Substituted:
case BuildResult::AlreadyValid:
result.stepStatus = bsSuccess;
result.isCached = true;
break;
case BuildResult::PermanentFailure:
result.stepStatus = bsFailed;
result.canCache = true;
result.errorMsg = "";
break;
case BuildResult::InputRejected:
case BuildResult::OutputRejected:
result.stepStatus = bsFailed;
result.canCache = true;
break;
case BuildResult::TransientFailure:
result.stepStatus = bsFailed;
result.canRetry = true;
result.errorMsg = "";
break;
case BuildResult::TimedOut:
result.stepStatus = bsTimedOut;
result.errorMsg = "";
break;
case BuildResult::MiscFailure:
result.stepStatus = bsAborted;
result.canRetry = true;
break;
case BuildResult::LogLimitExceeded:
result.stepStatus = bsLogLimitExceeded;
break;
case BuildResult::NotDeterministic:
result.stepStatus = bsNotDeterministic;
result.canRetry = false;
result.canCache = true;
break;
default:
result.stepStatus = bsAborted;
break;
}
if (result.stepStatus != bsSuccess) return;
result.errorMsg = "";
@@ -550,33 +421,11 @@ void State::buildRemote(ref<Store> destStore,
get a build log. */
if (result.isCached) {
printMsg(lvlInfo, "outputs of %s substituted or already valid on %s",
localStore->printStorePath(step->drvPath), machine->storeUri.render());
localStore->printStorePath(step->drvPath), machine->sshName);
unlink(result.logFile.c_str());
result.logFile = "";
}
/* Throttle CPU-bound work. Opportunistically skip updating the current
* step, since this requires a DB roundtrip. */
if (!localWorkThrottler.try_acquire()) {
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
updateStep(ssWaitingForLocalSlot);
localWorkThrottler.acquire();
}
SemaphoreReleaser releaser(&localWorkThrottler);
/* Once we've started copying outputs, release the machine reservation
* so further builds can happen. We do not release the machine earlier
* to avoid situations where the queue runner is bottlenecked on
* copying outputs and we end up building too many things that we
* haven't been able to allow copy slots for. */
reservation.reset();
wakeDispatcher();
StorePathSet outputs;
if (auto * successP = buildResult.tryGetSuccess())
for (auto & [_, realisation] : successP->builtOutputs)
outputs.insert(realisation.outPath);
/* Copy the output paths. */
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
updateStep(ssReceivingOutputs);
@@ -585,10 +434,39 @@ void State::buildRemote(ref<Store> destStore,
auto now1 = std::chrono::steady_clock::now();
auto infos = conn.queryPathInfos(*localStore, outputs);
StorePathSet outputs;
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
if (i.second.second)
outputs.insert(*i.second.second);
}
/* Get info about each output path. */
std::map<StorePath, ValidPathInfo> infos;
size_t totalNarSize = 0;
for (auto & [_, info] : infos) totalNarSize += info.narSize;
to << cmdQueryPathInfos;
worker_proto::write(*localStore, to, outputs);
to.flush();
while (true) {
auto storePathS = readString(from);
if (storePathS == "") break;
auto deriver = readString(from); // deriver
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
readLongLong(from); // download size
auto narSize = readLongLong(from);
auto narHash = Hash::parseAny(readString(from), htSHA256);
auto ca = parseContentAddressOpt(readString(from));
readStrings<StringSet>(from); // sigs
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
assert(outputs.count(info.path));
info.references = references;
info.narSize = narSize;
totalNarSize += info.narSize;
info.narHash = narHash;
info.ca = ca;
if (deriver != "")
info.deriver = localStore->parseStorePath(deriver);
infos.insert_or_assign(info.path, info);
}
if (totalNarSize > maxOutputSize) {
result.stepStatus = bsNarSizeLimitExceeded;
@@ -597,34 +475,43 @@ void State::buildRemote(ref<Store> destStore,
/* Copy each path. */
printMsg(lvlDebug, "copying outputs of %s from %s (%d bytes)",
localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
auto pathsSorted = reverseTopoSortPaths(infos);
for (auto & path : pathsSorted) {
auto & info = infos.find(path)->second;
/* Receive the NAR from the remote and add it to the
destination store. Meanwhile, extract all the info from the
NAR that getBuildOutput() needs. */
auto source2 = sinkToSource([&](Sink & sink)
{
/* Note: we should only send the command to dump the store
path to the remote if the NAR is actually going to get read
by the destination store, which won't happen if this path
is already valid on the destination store. Since this
lambda function only gets executed if someone tries to read
from source2, we will send the command from here rather
than outside the lambda. */
to << cmdDumpStorePath << localStore->printStorePath(path);
to.flush();
TeeSource tee(from, sink);
extractNarData(tee, localStore->printStorePath(path), narMembers);
});
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
}
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
auto now2 = std::chrono::steady_clock::now();
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
/* Register the outputs of the newly built drv */
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
if (auto * successP = buildResult.tryGetSuccess()) {
for (auto & [outputName, realisation] : successP->builtOutputs) {
// Register the resolved drv output
destStore->registerDrvOutput(realisation);
// Also register the unresolved one
auto unresolvedRealisation = realisation;
unresolvedRealisation.signatures.clear();
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
destStore->registerDrvOutput(unresolvedRealisation);
}
}
}
/* Shut down the connection. */
child->in = -1;
child->sshPid.wait();
child.to = -1;
child.pid.wait();
} catch (Error & e) {
/* Disable this machine until a certain period of time has
@@ -638,7 +525,7 @@ void State::buildRemote(ref<Store> destStore,
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
info->lastFailure = now;
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
printMsg(lvlInfo, "will disable machine %1% for %2%s", machine->storeUri.render(), delta);
printMsg(lvlInfo, "will disable machine %1% for %2%s", machine->sshName, delta);
info->disabledUntil = now + std::chrono::seconds(delta);
}
throw;

View File

@@ -1,7 +1,7 @@
#include "hydra-build-result.hh"
#include <nix/store/store-api.hh>
#include <nix/util/util.hh>
#include <nix/util/source-accessor.hh>
#include "store-api.hh"
#include "util.hh"
#include "fs-accessor.hh"
#include <regex>
@@ -11,18 +11,18 @@ using namespace nix;
BuildOutput getBuildOutput(
nix::ref<Store> store,
NarMemberDatas & narMembers,
const OutputPathMap derivationOutputs)
const Derivation & drv)
{
BuildOutput res;
/* Compute the closure size. */
StorePathSet outputs;
StorePathSet closure;
for (auto& [outputName, outputPath] : derivationOutputs) {
store->computeFSClosure(outputPath, closure);
outputs.insert(outputPath);
res.outputs.insert({outputName, outputPath});
}
for (auto & i : drv.outputsAndOptPaths(*store))
if (i.second.second) {
store->computeFSClosure(*i.second.second, closure);
outputs.insert(*i.second.second);
}
for (auto & path : closure) {
auto info = store->queryPathInfo(path);
res.closureSize += info->narSize;
@@ -51,8 +51,8 @@ BuildOutput getBuildOutput(
"[[:space:]]+"
"([a-zA-Z0-9_-]+)" // subtype (e.g. "readme")
"[[:space:]]+"
"(\"[^\"]+\"|[^[:space:]<>\"]+)" // path (may be quoted)
"([[:space:]]+([^[:space:]<>]+))?" // entry point
"(\"[^\"]+\"|[^[:space:]\"]+)" // path (may be quoted)
"([[:space:]]+([^[:space:]]+))?" // entry point
, std::regex::extended);
for (auto & output : outputs) {
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
if (productsFile == narMembers.end() ||
productsFile->second.type != SourceAccessor::Type::tRegular)
productsFile->second.type != FSAccessor::Type::tRegular)
continue;
assert(productsFile->second.contents);
@@ -78,7 +78,7 @@ BuildOutput getBuildOutput(
product.type = match[1];
product.subtype = match[2];
std::string s(match[3]);
product.path = s[0] == '"' && s.back() == '"' ? std::string(s, 1, s.size() - 2) : s;
product.path = s[0] == '"' ? std::string(s, 1, s.size() - 2) : s;
product.defaultPath = match[5];
/* Ensure that the path exists and points into the Nix
@@ -93,10 +93,8 @@ BuildOutput getBuildOutput(
if (file == narMembers.end()) continue;
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
if (!std::regex_match(product.name, std::regex("[a-zA-Z0-9.@:_ -]*")))
product.name = "";
if (file->second.type == SourceAccessor::Type::tRegular) {
if (file->second.type == FSAccessor::Type::tRegular) {
product.isRegular = true;
product.fileSize = file->second.fileSize.value();
product.sha256hash = file->second.sha256.value();
@@ -109,16 +107,17 @@ BuildOutput getBuildOutput(
/* If no build products were explicitly declared, then add all
outputs as a product of type "nix-build". */
if (!explicitProducts) {
for (auto & [name, output] : derivationOutputs) {
for (auto & [name, output] : drv.outputs) {
BuildProduct product;
product.path = store->printStorePath(output);
auto outPath = output.path(*store, drv.name, name);
product.path = store->printStorePath(*outPath);
product.type = "nix-build";
product.subtype = name == "out" ? "" : name;
product.name = output.name();
product.name = outPath->name();
auto file = narMembers.find(product.path);
assert(file != narMembers.end());
if (file->second.type == SourceAccessor::Type::tDirectory)
if (file->second.type == FSAccessor::Type::tDirectory)
res.products.push_back(product);
}
}
@@ -127,34 +126,25 @@ BuildOutput getBuildOutput(
for (auto & output : outputs) {
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
if (file == narMembers.end() ||
file->second.type != SourceAccessor::Type::tRegular)
file->second.type != FSAccessor::Type::tRegular)
continue;
auto contents = trim(file->second.contents.value());
if (std::regex_match(contents, std::regex("[a-zA-Z0-9.@:_-]+")))
res.releaseName = contents;
res.releaseName = trim(file->second.contents.value());
// FIXME: validate release name
}
/* Get metrics. */
for (auto & output : outputs) {
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
if (file == narMembers.end() ||
file->second.type != SourceAccessor::Type::tRegular)
file->second.type != FSAccessor::Type::tRegular)
continue;
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
auto fields = tokenizeString<std::vector<std::string>>(line);
if (fields.size() < 2) continue;
if (!std::regex_match(fields[0], std::regex("[a-zA-Z0-9._-]+")))
continue;
BuildMetric metric;
metric.name = fields[0];
try {
metric.value = std::stod(fields[1]);
} catch (...) {
continue; // skip this metric
}
metric.name = fields[0]; // FIXME: validate
metric.value = atof(fields[1].c_str()); // FIXME
metric.unit = fields.size() >= 3 ? fields[2] : "";
if (!std::regex_match(metric.unit, std::regex("[a-zA-Z0-9._%-]+")))
metric.unit = "";
res.metrics[metric.name] = metric;
}
}

View File

@@ -2,8 +2,8 @@
#include "state.hh"
#include "hydra-build-result.hh"
#include <nix/util/finally.hh>
#include <nix/store/binary-cache-store.hh>
#include "finally.hh"
#include "binary-cache-store.hh"
using namespace nix;
@@ -16,7 +16,7 @@ void setThreadName(const std::string & name)
}
void State::builder(std::unique_ptr<MachineReservation> reservation)
void State::builder(MachineReservation::ptr reservation)
{
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
@@ -35,20 +35,22 @@ void State::builder(std::unique_ptr<MachineReservation> reservation)
activeSteps_.lock()->erase(activeStep);
});
std::string machine = reservation->machine->storeUri.render();
try {
auto destStore = getDestStore();
// Might release the reservation.
res = doBuildStep(destStore, std::move(reservation), activeStep);
res = doBuildStep(destStore, reservation, activeStep);
} catch (std::exception & e) {
printMsg(lvlError, "uncaught exception building %s on %s: %s",
localStore->printStorePath(activeStep->step->drvPath),
machine,
localStore->printStorePath(reservation->step->drvPath),
reservation->machine->sshName,
e.what());
}
}
/* Release the machine and wake up the dispatcher. */
assert(reservation.unique());
reservation = 0;
wakeDispatcher();
/* If there was a temporary failure, retry the step after an
exponentially increasing interval. */
Step::ptr step = wstep.lock();
@@ -70,11 +72,11 @@ void State::builder(std::unique_ptr<MachineReservation> reservation)
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
std::unique_ptr<MachineReservation> reservation,
MachineReservation::ptr reservation,
std::shared_ptr<ActiveStep> activeStep)
{
auto step(reservation->step);
auto machine(reservation->machine);
auto & step(reservation->step);
auto & machine(reservation->machine);
{
auto step_(step->state.lock());
@@ -96,13 +98,8 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
it). */
BuildID buildId;
std::optional<StorePath> buildDrvPath;
// Other fields set below
nix::ServeProto::BuildOptions buildOptions {
.maxLogSize = maxLogSize,
.nrRepeats = step->isDeterministic ? 1u : 0u,
.enforceDeterminism = step->isDeterministic,
.keepFailed = false,
};
unsigned int maxSilentTime, buildTimeout;
unsigned int repeats = step->isDeterministic ? 1 : 0;
auto conn(dbPool.get());
@@ -137,18 +134,18 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
{
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
if (i != jobsetRepeats.end())
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
repeats = std::max(repeats, i->second);
}
}
if (!build) build = *dependents.begin();
buildId = build->id;
buildDrvPath = build->drvPath;
buildOptions.maxSilentTime = build->maxSilentTime;
buildOptions.buildTimeout = build->buildTimeout;
maxSilentTime = build->maxSilentTime;
buildTimeout = build->buildTimeout;
printInfo("performing step %s %d times on %s (needed by build %d and %d others)",
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
}
if (!buildOneDone)
@@ -176,7 +173,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
unlink(result.logFile.c_str());
}
} catch (...) {
ignoreExceptionInDestructor();
ignoreException();
}
}
});
@@ -194,7 +191,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
{
auto mc = startDbUpdate();
pqxx::work txn(*conn);
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->sshName, bsBusy);
txn.commit();
}
@@ -209,7 +206,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
try {
/* FIXME: referring builds may have conflicting timeouts. */
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
} catch (Error & e) {
if (activeStep->state_.lock()->cancelled) {
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
@@ -224,7 +221,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
if (result.stepStatus == bsSuccess) {
updateStep(ssPostProcessing);
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
res = getBuildOutput(destStore, narMembers, *step->drv);
}
}
@@ -251,7 +248,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
/* Finish the step in the database. */
if (stepNr) {
pqxx::work txn(*conn);
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
finishBuildStep(txn, result, buildId, stepNr, machine->sshName);
txn.commit();
}
@@ -259,7 +256,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
issue). Retry a number of times. */
if (result.canRetry) {
printMsg(lvlError, "possibly transient failure building %s on %s: %s",
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
localStore->printStorePath(step->drvPath), machine->sshName, result.errorMsg);
assert(stepNr);
bool retry;
{
@@ -278,12 +275,9 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
assert(stepNr);
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
if (!optOutputPath)
throw Error(
"Missing output %s for derivation %d which was supposed to have succeeded",
outputName, localStore->printStorePath(step->drvPath));
addRoot(*optOutputPath);
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
if (i.second.second)
addRoot(*i.second.second);
}
/* Register success in the database for all Build objects that
@@ -329,7 +323,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
pqxx::work txn(*conn);
for (auto & b : direct) {
printInfo("marking build %1% as succeeded", b->id);
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
result.startTime, result.stopTime);
}
@@ -404,7 +398,7 @@ void State::failStep(
Step::ptr step,
BuildID buildId,
const RemoteResult & result,
::Machine::ptr machine,
Machine::ptr machine,
bool & stepFinished)
{
/* Register failure in the database for all Build objects that
@@ -450,20 +444,21 @@ void State::failStep(
build->finishedInDB)
continue;
createBuildStep(txn,
0, build->id, step, machine ? machine->storeUri.render() : "",
0, build->id, step, machine ? machine->sshName : "",
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
}
/* Mark all builds that depend on this derivation as failed. */
for (auto & build : indirect) {
if (build->finishedInDB) continue;
printError("marking build %1% as failed", build->id);
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
pqxx::params{build->id,
printMsg(lvlError, format("marking build %1% as failed") % build->id);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build->id,
(int) (build->drvPath != step->drvPath && result.buildStatus() == bsFailed ? bsDepFailed : result.buildStatus()),
result.startTime,
result.stopTime,
result.stepStatus == bsCachedFailure ? 1 : 0}).no_rows();
result.stepStatus == bsCachedFailure ? 1 : 0);
nrBuildsDone++;
}
@@ -472,7 +467,7 @@ void State::failStep(
if (result.stepStatus != bsCachedFailure && result.canCache)
for (auto & i : step->drv->outputsAndOptPaths(*localStore))
if (i.second.second)
txn.exec("insert into FailedPaths values ($1)", pqxx::params{localStore->printStorePath(*i.second.second)}).no_rows();
txn.exec_params0("insert into FailedPaths values ($1)", localStore->printStorePath(*i.second.second));
txn.commit();
}

View File

@@ -2,7 +2,6 @@
#include <cmath>
#include <thread>
#include <unordered_map>
#include <unordered_set>
#include "state.hh"
@@ -40,34 +39,28 @@ void State::dispatcher()
printMsg(lvlDebug, "dispatcher woken up");
nrDispatcherWakeups++;
auto t_before_work = std::chrono::steady_clock::now();
auto now1 = std::chrono::steady_clock::now();
auto sleepUntil = doDispatch();
auto t_after_work = std::chrono::steady_clock::now();
auto now2 = std::chrono::steady_clock::now();
prom.dispatcher_time_spent_running.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
/* Sleep until we're woken up (either because a runnable build
is added, or because a build finishes). */
{
auto dispatcherWakeup_(dispatcherWakeup.lock());
if (!*dispatcherWakeup_) {
debug("dispatcher sleeping for %1%s",
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
}
*dispatcherWakeup_ = false;
}
auto t_after_sleep = std::chrono::steady_clock::now();
prom.dispatcher_time_spent_waiting.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
} catch (std::exception & e) {
printError("dispatcher: %s", e.what());
printMsg(lvlError, format("dispatcher: %1%") % e.what());
sleep(1);
}
@@ -87,124 +80,17 @@ system_time State::doDispatch()
jobset.second->pruneSteps();
auto s2 = jobset.second->shareUsed();
if (s1 != s2)
debug("pruned scheduling window of %1%:%2% from %3% to %4%",
jobset.first.first, jobset.first.second, s1, s2);
printMsg(lvlDebug, format("pruned scheduling window of %1%:%2% from %3% to %4%")
% jobset.first.first % jobset.first.second % s1 % s2);
}
}
system_time now = std::chrono::system_clock::now();
/* Start steps until we're out of steps or slots. */
auto sleepUntil = system_time::max();
bool keepGoing;
/* Sort the runnable steps by priority. Priority is establised
as follows (in order of precedence):
- The global priority of the builds that depend on the
step. This allows admins to bump a build to the front of
the queue.
- The lowest used scheduling share of the jobsets depending
on the step.
- The local priority of the build, as set via the build's
meta.schedulingPriority field. Note that this is not
quite correct: the local priority should only be used to
establish priority between builds in the same jobset, but
here it's used between steps in different jobsets if they
happen to have the same lowest used scheduling share. But
that's not very likely.
- The lowest ID of the builds depending on the step;
i.e. older builds take priority over new ones.
FIXME: O(n lg n); obviously, it would be better to keep a
runnable queue sorted by priority. */
struct StepInfo
{
Step::ptr step;
bool alreadyScheduled = false;
/* The lowest share used of any jobset depending on this
step. */
double lowestShareUsed = 1e9;
/* Info copied from step->state to ensure that the
comparator is a partial ordering (see MachineInfo). */
int highestGlobalPriority;
int highestLocalPriority;
size_t numRequiredSystemFeatures;
size_t numRevDeps;
BuildID lowestBuildID;
StepInfo(Step::ptr step, Step::State & step_) : step(step)
{
for (auto & jobset : step_.jobsets)
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
highestGlobalPriority = step_.highestGlobalPriority;
highestLocalPriority = step_.highestLocalPriority;
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
numRevDeps = step_.rdeps.size();
lowestBuildID = step_.lowestBuildID;
}
};
std::vector<StepInfo> runnableSorted;
struct RunnablePerType
{
unsigned int count{0};
std::chrono::seconds waitTime{0};
};
std::unordered_map<std::string, RunnablePerType> runnablePerType;
{
auto runnable_(runnable.lock());
runnableSorted.reserve(runnable_->size());
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
auto step = i->lock();
/* Remove dead steps. */
if (!step) {
i = runnable_->erase(i);
continue;
}
++i;
auto & r = runnablePerType[step->systemType];
r.count++;
/* Skip previously failed steps that aren't ready
to be retried. */
auto step_(step->state.lock());
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
if (step_->tries > 0 && step_->after > now) {
if (step_->after < sleepUntil)
sleepUntil = step_->after;
continue;
}
runnableSorted.emplace_back(step, *step_);
}
}
sort(runnableSorted.begin(), runnableSorted.end(),
[](const StepInfo & a, const StepInfo & b)
{
return
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
a.lowestBuildID < b.lowestBuildID;
});
do {
now = std::chrono::system_clock::now();
system_time now = std::chrono::system_clock::now();
/* Copy the currentJobs field of each machine. This is
necessary to ensure that the sort comparator below is
@@ -212,7 +98,7 @@ system_time State::doDispatch()
filter out temporarily disabled machines. */
struct MachineInfo
{
::Machine::ptr machine;
Machine::ptr machine;
unsigned long currentJobs;
};
std::vector<MachineInfo> machinesSorted;
@@ -252,6 +138,104 @@ system_time State::doDispatch()
a.currentJobs > b.currentJobs;
});
/* Sort the runnable steps by priority. Priority is establised
as follows (in order of precedence):
- The global priority of the builds that depend on the
step. This allows admins to bump a build to the front of
the queue.
- The lowest used scheduling share of the jobsets depending
on the step.
- The local priority of the build, as set via the build's
meta.schedulingPriority field. Note that this is not
quite correct: the local priority should only be used to
establish priority between builds in the same jobset, but
here it's used between steps in different jobsets if they
happen to have the same lowest used scheduling share. But
that's not very likely.
- The lowest ID of the builds depending on the step;
i.e. older builds take priority over new ones.
FIXME: O(n lg n); obviously, it would be better to keep a
runnable queue sorted by priority. */
struct StepInfo
{
Step::ptr step;
/* The lowest share used of any jobset depending on this
step. */
double lowestShareUsed = 1e9;
/* Info copied from step->state to ensure that the
comparator is a partial ordering (see MachineInfo). */
int highestGlobalPriority;
int highestLocalPriority;
BuildID lowestBuildID;
StepInfo(Step::ptr step, Step::State & step_) : step(step)
{
for (auto & jobset : step_.jobsets)
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
highestGlobalPriority = step_.highestGlobalPriority;
highestLocalPriority = step_.highestLocalPriority;
lowestBuildID = step_.lowestBuildID;
}
};
std::vector<StepInfo> runnableSorted;
struct RunnablePerType
{
unsigned int count{0};
std::chrono::seconds waitTime{0};
};
std::unordered_map<std::string, RunnablePerType> runnablePerType;
{
auto runnable_(runnable.lock());
runnableSorted.reserve(runnable_->size());
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
auto step = i->lock();
/* Remove dead steps. */
if (!step) {
i = runnable_->erase(i);
continue;
}
++i;
auto & r = runnablePerType[step->systemType];
r.count++;
/* Skip previously failed steps that aren't ready
to be retried. */
auto step_(step->state.lock());
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
if (step_->tries > 0 && step_->after > now) {
if (step_->after < sleepUntil)
sleepUntil = step_->after;
continue;
}
runnableSorted.emplace_back(step, *step_);
}
}
sort(runnableSorted.begin(), runnableSorted.end(),
[](const StepInfo & a, const StepInfo & b)
{
return
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
a.lowestBuildID < b.lowestBuildID;
});
/* Find a machine with a free slot and find a step to run
on it. Once we find such a pair, we restart the outer
loop because the machine sorting will have changed. */
@@ -261,14 +245,12 @@ system_time State::doDispatch()
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
for (auto & stepInfo : runnableSorted) {
if (stepInfo.alreadyScheduled) continue;
auto & step(stepInfo.step);
/* Can this machine do this step? */
if (!mi.machine->supportsStep(step)) {
debug("machine '%s' does not support step '%s' (system type '%s')",
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
mi.machine->sshName, localStore->printStorePath(step->drvPath), step->drv->platform);
continue;
}
@@ -289,12 +271,10 @@ system_time State::doDispatch()
r.count--;
}
stepInfo.alreadyScheduled = true;
/* Make a slot reservation and start a thread to
do the build. */
auto builderThread = std::thread(&State::builder, this,
std::make_unique<MachineReservation>(*this, step, mi.machine));
std::make_shared<MachineReservation>(*this, step, mi.machine));
builderThread.detach(); // FIXME?
keepGoing = true;
@@ -448,7 +428,7 @@ void Jobset::pruneSteps()
}
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
: state(state), step(step), machine(machine)
{
machine->state->currentJobs++;

View File

@@ -2,9 +2,9 @@
#include <memory>
#include <nix/util/hash.hh>
#include <nix/store/derivations.hh>
#include <nix/store/store-api.hh>
#include "hash.hh"
#include "derivations.hh"
#include "store-api.hh"
#include "nar-extractor.hh"
struct BuildProduct
@@ -36,12 +36,10 @@ struct BuildOutput
std::list<BuildProduct> products;
std::map<std::string, nix::StorePath> outputs;
std::map<std::string, BuildMetric> metrics;
};
BuildOutput getBuildOutput(
nix::ref<nix::Store> store,
NarMemberDatas & narMembers,
const nix::OutputPathMap derivationOutputs);
const nix::Derivation & drv);

View File

@@ -1,7 +1,6 @@
#include <iostream>
#include <thread>
#include <optional>
#include <type_traits>
#include <sys/types.h>
#include <sys/stat.h>
@@ -9,21 +8,27 @@
#include <prometheus/exposer.h>
#include <nlohmann/json.hpp>
#include <nix/util/signals.hh>
#include "state.hh"
#include "hydra-build-result.hh"
#include <nix/store/store-open.hh>
#include <nix/store/remote-store.hh>
#include "store-api.hh"
#include "remote-store.hh"
#include <nix/store/globals.hh>
#include "globals.hh"
#include "hydra-config.hh"
#include <nix/store/s3-binary-cache-store.hh>
#include <nix/main/shared.hh>
#include "json.hh"
#include "s3-binary-cache-store.hh"
#include "shared.hh"
using namespace nix;
using nlohmann::json;
namespace nix {
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
}
std::string getEnvOrDie(const std::string & key)
@@ -70,31 +75,10 @@ State::PromMetrics::PromMetrics()
.Register(*registry)
.Add({})
)
, dispatcher_time_spent_running(
prometheus::BuildCounter()
.Name("hydraqueuerunner_dispatcher_time_spent_running")
.Help("Time (in micros) spent running the dispatcher")
.Register(*registry)
.Add({})
)
, dispatcher_time_spent_waiting(
prometheus::BuildCounter()
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
.Register(*registry)
.Add({})
)
, queue_monitor_time_spent_running(
prometheus::BuildCounter()
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
.Help("Time (in micros) spent running the queue monitor")
.Register(*registry)
.Add({})
)
, queue_monitor_time_spent_waiting(
prometheus::BuildCounter()
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
, queue_max_id(
prometheus::BuildGauge()
.Name("hydraqueuerunner_queue_max_build_id_info")
.Help("Maximum build record ID in the queue")
.Register(*registry)
.Add({})
)
@@ -106,7 +90,6 @@ State::State(std::optional<std::string> metricsAddrOpt)
: config(std::make_unique<HydraConfig>())
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
, dbPool(config->getIntOption("max_db_connections", 128))
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
@@ -157,29 +140,50 @@ void State::parseMachines(const std::string & contents)
oldMachines = *machines_;
}
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
auto machine = std::make_shared<::Machine>(std::move(machine_));
for (auto line : tokenizeString<Strings>(contents, "\n")) {
line = trim(std::string(line, 0, line.find('#')));
auto tokens = tokenizeString<std::vector<std::string>>(line);
if (tokens.size() < 3) continue;
tokens.resize(8);
auto machine = std::make_shared<Machine>();
machine->sshName = tokens[0];
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2];
if (tokens[3] != "")
machine->maxJobs = string2Int<decltype(machine->maxJobs)>(tokens[3]).value();
else
machine->maxJobs = 1;
machine->speedFactor = atof(tokens[4].c_str());
if (tokens[5] == "-") tokens[5] = "";
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
if (tokens[6] == "-") tokens[6] = "";
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
for (auto & f : machine->mandatoryFeatures)
machine->supportedFeatures.insert(f);
if (tokens[7] != "" && tokens[7] != "-")
machine->sshPublicHostKey = base64Decode(tokens[7]);
/* Re-use the State object of the previous machine with the
same name. */
auto i = oldMachines.find(machine->storeUri.variant);
auto i = oldMachines.find(machine->sshName);
if (i == oldMachines.end())
printMsg(lvlChatty, "adding new machine %1%", machine->storeUri.render());
printMsg(lvlChatty, format("adding new machine %1%") % machine->sshName);
else
printMsg(lvlChatty, "updating machine %1%", machine->storeUri.render());
printMsg(lvlChatty, format("updating machine %1%") % machine->sshName);
machine->state = i == oldMachines.end()
? std::make_shared<::Machine::State>()
? std::make_shared<Machine::State>()
: i->second->state;
newMachines[machine->storeUri.variant] = machine;
newMachines[machine->sshName] = machine;
}
for (auto & m : oldMachines)
if (newMachines.find(m.first) == newMachines.end()) {
if (m.second->enabled)
printInfo("removing machine %1%", m.second->storeUri.render());
/* Add a disabled ::Machine object to make sure stats are
printMsg(lvlInfo, format("removing machine %1%") % m.first);
/* Add a disabled Machine object to make sure stats are
maintained. */
auto machine = std::make_shared<::Machine>(*(m.second));
auto machine = std::make_shared<Machine>(*(m.second));
machine->enabled = false;
newMachines[m.first] = machine;
}
@@ -207,7 +211,7 @@ void State::monitorMachinesFile()
parseMachines("localhost " +
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
+ concatStringsSep(",", settings.systemFeatures.get()));
machinesReadyLock.unlock();
return;
}
@@ -276,16 +280,17 @@ void State::monitorMachinesFile()
void State::clearBusy(Connection & conn, time_t stopTime)
{
pqxx::work txn(conn);
txn.exec("update BuildSteps set busy = 0, status = $1, stopTime = $2 where busy != 0",
pqxx::params{(int) bsAborted,
stopTime != 0 ? std::make_optional(stopTime) : std::nullopt}).no_rows();
txn.exec_params0
("update BuildSteps set busy = 0, status = $1, stopTime = $2 where busy != 0",
(int) bsAborted,
stopTime != 0 ? std::make_optional(stopTime) : std::nullopt);
txn.commit();
}
unsigned int State::allocBuildStep(pqxx::work & txn, BuildID buildId)
{
auto res = txn.exec("select max(stepnr) from BuildSteps where build = $1", buildId).one_row();
auto res = txn.exec_params1("select max(stepnr) from BuildSteps where build = $1", buildId);
return res[0].is_null() ? 1 : res[0].as<int>() + 1;
}
@@ -296,8 +301,9 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
restart:
auto stepNr = allocBuildStep(txn, buildId);
auto r = txn.exec("insert into BuildSteps (build, stepnr, type, drvPath, busy, startTime, system, status, propagatedFrom, errorMsg, stopTime, machine) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) on conflict do nothing",
pqxx::params{buildId,
auto r = txn.exec_params
("insert into BuildSteps (build, stepnr, type, drvPath, busy, startTime, system, status, propagatedFrom, errorMsg, stopTime, machine) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) on conflict do nothing",
buildId,
stepNr,
0, // == build
localStore->printStorePath(step->drvPath),
@@ -308,16 +314,14 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
propagatedFrom != 0 ? std::make_optional(propagatedFrom) : std::nullopt, // internal::params
errorMsg != "" ? std::make_optional(errorMsg) : std::nullopt,
startTime != 0 && status != bsBusy ? std::make_optional(startTime) : std::nullopt,
machine});
machine);
if (r.affected_rows() == 0) goto restart;
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
txn.exec("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
pqxx::params{buildId, stepNr, name,
output
? std::optional { localStore->printStorePath(*output)}
: std::nullopt}).no_rows();
for (auto & [name, output] : step->drv->outputs)
txn.exec_params0
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
if (status == bsBusy)
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
@@ -328,10 +332,11 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
void State::updateBuildStep(pqxx::work & txn, BuildID buildId, unsigned int stepNr, StepState stepState)
{
if (txn.exec("update BuildSteps set busy = $1 where build = $2 and stepnr = $3 and busy != 0 and status is null",
pqxx::params{(int) stepState,
if (txn.exec_params
("update BuildSteps set busy = $1 where build = $2 and stepnr = $3 and busy != 0 and status is null",
(int) stepState,
buildId,
stepNr}).affected_rows() != 1)
stepNr).affected_rows() != 1)
throw Error("step %d of build %d is in an unexpected state", stepNr, buildId);
}
@@ -341,52 +346,44 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
{
assert(result.startTime);
assert(result.stopTime);
txn.exec("update BuildSteps set busy = 0, status = $1, errorMsg = $4, startTime = $5, stopTime = $6, machine = $7, overhead = $8, timesBuilt = $9, isNonDeterministic = $10 where build = $2 and stepnr = $3",
pqxx::params{(int) result.stepStatus, buildId, stepNr,
txn.exec_params0
("update BuildSteps set busy = 0, status = $1, errorMsg = $4, startTime = $5, stopTime = $6, machine = $7, overhead = $8, timesBuilt = $9, isNonDeterministic = $10 where build = $2 and stepnr = $3",
(int) result.stepStatus, buildId, stepNr,
result.errorMsg != "" ? std::make_optional(result.errorMsg) : std::nullopt,
result.startTime, result.stopTime,
machine != "" ? std::make_optional(machine) : std::nullopt,
result.overhead != 0 ? std::make_optional(result.overhead) : std::nullopt,
result.timesBuilt > 0 ? std::make_optional(result.timesBuilt) : std::nullopt,
result.timesBuilt > 1 ? std::make_optional(result.isNonDeterministic) : std::nullopt}).no_rows();
result.timesBuilt > 1 ? std::make_optional(result.isNonDeterministic) : std::nullopt);
assert(result.logFile.find('\t') == std::string::npos);
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
buildId, stepNr, result.logFile));
if (result.stepStatus == bsSuccess) {
// Update the corresponding `BuildStepOutputs` row to add the output path
auto res = txn.exec("select drvPath from BuildSteps where build = $1 and stepnr = $2", pqxx::params{buildId, stepNr}).one_row();
assert(res.size());
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
// If we've finished building, all the paths should be known
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
txn.exec("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
pqxx::params{buildId, stepNr, name, localStore->printStorePath(output)}).no_rows();
}
}
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath)
{
restart:
auto stepNr = allocBuildStep(txn, build->id);
auto r = txn.exec("insert into BuildSteps (build, stepnr, type, drvPath, busy, status, startTime, stopTime) values ($1, $2, $3, $4, $5, $6, $7, $8) on conflict do nothing",
pqxx::params{build->id,
auto r = txn.exec_params
("insert into BuildSteps (build, stepnr, type, drvPath, busy, status, startTime, stopTime) values ($1, $2, $3, $4, $5, $6, $7, $8) on conflict do nothing",
build->id,
stepNr,
1, // == substitution
(localStore->printStorePath(drvPath)),
0,
0,
startTime,
stopTime});
stopTime);
if (r.affected_rows() == 0) goto restart;
txn.exec("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
pqxx::params{build->id, stepNr, outputName,
localStore->printStorePath(storePath)}).no_rows();
txn.exec_params0
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
build->id, stepNr, outputName,
localStore->printStorePath(storePath));
return stepNr;
}
@@ -453,54 +450,49 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
{
if (build->finishedInDB) return;
if (txn.exec("select 1 from Builds where id = $1 and finished = 0", pqxx::params{build->id}).empty()) return;
if (txn.exec_params("select 1 from Builds where id = $1 and finished = 0", build->id).empty()) return;
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, size = $5, closureSize = $6, releaseName = $7, isCachedBuild = $8, notificationPendingSince = $4 where id = $1",
pqxx::params{build->id,
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, size = $5, closureSize = $6, releaseName = $7, isCachedBuild = $8, notificationPendingSince = $4 where id = $1",
build->id,
(int) (res.failed ? bsFailedWithOutput : bsSuccess),
startTime,
stopTime,
res.size,
res.closureSize,
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
isCachedBuild ? 1 : 0}).no_rows();
isCachedBuild ? 1 : 0);
for (auto & [outputName, outputPath] : res.outputs) {
txn.exec("update BuildOutputs set path = $3 where build = $1 and name = $2",
pqxx::params{build->id,
outputName,
localStore->printStorePath(outputPath)}
).no_rows();
}
txn.exec("delete from BuildProducts where build = $1", pqxx::params{build->id}).no_rows();
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
unsigned int productNr = 1;
for (auto & product : res.products) {
txn.exec("insert into BuildProducts (build, productnr, type, subtype, fileSize, sha256hash, path, name, defaultPath) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
pqxx::params{build->id,
txn.exec_params0
("insert into BuildProducts (build, productnr, type, subtype, fileSize, sha256hash, path, name, defaultPath) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
build->id,
productNr++,
product.type,
product.subtype,
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
product.path,
product.name,
product.defaultPath}).no_rows();
product.defaultPath);
}
txn.exec("delete from BuildMetrics where build = $1", pqxx::params{build->id}).no_rows();
txn.exec_params0("delete from BuildMetrics where build = $1", build->id);
for (auto & metric : res.metrics) {
txn.exec("insert into BuildMetrics (build, name, unit, value, project, jobset, job, timestamp) values ($1, $2, $3, $4, $5, $6, $7, $8)",
pqxx::params{build->id,
txn.exec_params0
("insert into BuildMetrics (build, name, unit, value, project, jobset, job, timestamp) values ($1, $2, $3, $4, $5, $6, $7, $8)",
build->id,
metric.second.name,
metric.second.unit != "" ? std::make_optional(metric.second.unit) : std::nullopt,
metric.second.value,
build->projectName,
build->jobsetName,
build->jobName,
build->timestamp}).no_rows();
build->timestamp);
}
nrBuildsDone++;
@@ -512,7 +504,7 @@ bool State::checkCachedFailure(Step::ptr step, Connection & conn)
pqxx::work txn(conn);
for (auto & i : step->drv->outputsAndOptPaths(*localStore))
if (i.second.second)
if (!txn.exec("select 1 from FailedPaths where path = $1", pqxx::params{localStore->printStorePath(*i.second.second)}).empty())
if (!txn.exec_params("select 1 from FailedPaths where path = $1", localStore->printStorePath(*i.second.second)).empty())
return true;
return false;
}
@@ -550,182 +542,190 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
void State::dumpStatus(Connection & conn)
{
time_t now = time(0);
json statusJson = {
{"status", "up"},
{"time", time(0)},
{"uptime", now - startedAt},
{"pid", getpid()},
std::ostringstream out;
{"nrQueuedBuilds", builds.lock()->size()},
{"nrActiveSteps", activeSteps_.lock()->size()},
{"nrStepsBuilding", nrStepsBuilding.load()},
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
{"nrStepsWaiting", nrStepsWaiting.load()},
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
{"bytesSent", bytesSent.load()},
{"bytesReceived", bytesReceived.load()},
{"nrBuildsRead", nrBuildsRead.load()},
{"buildReadTimeMs", buildReadTimeMs.load()},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
{"nrBuildsDone", nrBuildsDone.load()},
{"nrStepsStarted", nrStepsStarted.load()},
{"nrStepsDone", nrStepsDone.load()},
{"nrRetries", nrRetries.load()},
{"maxNrRetries", maxNrRetries.load()},
{"nrQueueWakeups", nrQueueWakeups.load()},
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
{"dispatchTimeMs", dispatchTimeMs.load()},
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
{"nrDbConnections", dbPool.count()},
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
};
{
JSONObject root(out);
time_t now = time(0);
root.attr("status", "up");
root.attr("time", time(0));
root.attr("uptime", now - startedAt);
root.attr("pid", getpid());
{
auto builds_(builds.lock());
root.attr("nrQueuedBuilds", builds_->size());
}
{
auto steps_(steps.lock());
for (auto i = steps_->begin(); i != steps_->end(); )
if (i->second.lock()) ++i; else i = steps_->erase(i);
statusJson["nrUnfinishedSteps"] = steps_->size();
root.attr("nrUnfinishedSteps", steps_->size());
}
{
auto runnable_(runnable.lock());
for (auto i = runnable_->begin(); i != runnable_->end(); )
if (i->lock()) ++i; else i = runnable_->erase(i);
statusJson["nrRunnableSteps"] = runnable_->size();
root.attr("nrRunnableSteps", runnable_->size());
}
root.attr("nrActiveSteps", activeSteps_.lock()->size());
root.attr("nrStepsBuilding", nrStepsBuilding);
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
root.attr("nrStepsWaiting", nrStepsWaiting);
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
root.attr("bytesSent", bytesSent);
root.attr("bytesReceived", bytesReceived);
root.attr("nrBuildsRead", nrBuildsRead);
root.attr("buildReadTimeMs", buildReadTimeMs);
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
root.attr("nrBuildsDone", nrBuildsDone);
root.attr("nrStepsStarted", nrStepsStarted);
root.attr("nrStepsDone", nrStepsDone);
root.attr("nrRetries", nrRetries);
root.attr("maxNrRetries", maxNrRetries);
if (nrStepsDone) {
statusJson["totalStepTime"] = totalStepTime.load();
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
root.attr("totalStepTime", totalStepTime);
root.attr("totalStepBuildTime", totalStepBuildTime);
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
}
root.attr("nrQueueWakeups", nrQueueWakeups);
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
root.attr("dispatchTimeMs", dispatchTimeMs);
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
root.attr("nrDbConnections", dbPool.count());
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
{
auto machines_json = json::object();
auto nested = root.object("machines");
auto machines_(machines.lock());
for (auto & i : *machines_) {
auto & m(i.second);
auto & s(m->state);
auto info(m->state->connectInfo.lock());
auto nested2 = nested.object(m->sshName);
nested2.attr("enabled", m->enabled);
json machine = {
{"enabled", m->enabled},
{"systemTypes", m->systemTypes},
{"supportedFeatures", m->supportedFeatures},
{"mandatoryFeatures", m->mandatoryFeatures},
{"nrStepsDone", s->nrStepsDone.load()},
{"currentJobs", s->currentJobs.load()},
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
{"consecutiveFailures", info->consecutiveFailures},
};
if (s->currentJobs == 0)
machine["idleSince"] = s->idleSince.load();
if (m->state->nrStepsDone) {
machine["totalStepTime"] = s->totalStepTime.load();
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
{
auto list = nested2.list("systemTypes");
for (auto & s : m->systemTypes)
list.elem(s);
}
machines_json[m->storeUri.render()] = machine;
{
auto list = nested2.list("supportedFeatures");
for (auto & s : m->supportedFeatures)
list.elem(s);
}
{
auto list = nested2.list("mandatoryFeatures");
for (auto & s : m->mandatoryFeatures)
list.elem(s);
}
nested2.attr("currentJobs", s->currentJobs);
if (s->currentJobs == 0)
nested2.attr("idleSince", s->idleSince);
nested2.attr("nrStepsDone", s->nrStepsDone);
if (m->state->nrStepsDone) {
nested2.attr("totalStepTime", s->totalStepTime);
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
}
auto info(m->state->connectInfo.lock());
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
nested2.attr("consecutiveFailures", info->consecutiveFailures);
}
statusJson["machines"] = machines_json;
}
{
auto jobsets_json = json::object();
auto nested = root.object("jobsets");
auto jobsets_(jobsets.lock());
for (auto & jobset : *jobsets_) {
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
{"shareUsed", jobset.second->shareUsed()},
{"seconds", jobset.second->getSeconds()},
};
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
nested2.attr("shareUsed", jobset.second->shareUsed());
nested2.attr("seconds", jobset.second->getSeconds());
}
statusJson["jobsets"] = jobsets_json;
}
{
auto machineTypesJson = json::object();
auto nested = root.object("machineTypes");
auto machineTypes_(machineTypes.lock());
for (auto & i : *machineTypes_) {
auto machineTypeJson = machineTypesJson[i.first] = {
{"runnable", i.second.runnable},
{"running", i.second.running},
};
auto nested2 = nested.object(i.first);
nested2.attr("runnable", i.second.runnable);
nested2.attr("running", i.second.running);
if (i.second.runnable > 0)
machineTypeJson["waitTime"] = i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck);
nested2.attr("waitTime", i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck));
if (i.second.running == 0)
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
}
statusJson["machineTypes"] = machineTypesJson;
}
auto store = getDestStore();
auto nested = root.object("store");
auto & stats = store->getStats();
statusJson["store"] = {
{"narInfoRead", stats.narInfoRead.load()},
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
{"narInfoMissing", stats.narInfoMissing.load()},
{"narInfoWrite", stats.narInfoWrite.load()},
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
{"narRead", stats.narRead.load()},
{"narReadBytes", stats.narReadBytes.load()},
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
{"narWrite", stats.narWrite.load()},
{"narWriteAverted", stats.narWriteAverted.load()},
{"narWriteBytes", stats.narWriteBytes.load()},
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
{"narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0},
{"narCompressionSpeed", // MiB/s
nested.attr("narInfoRead", stats.narInfoRead);
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
nested.attr("narInfoMissing", stats.narInfoMissing);
nested.attr("narInfoWrite", stats.narInfoWrite);
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
nested.attr("narRead", stats.narRead);
nested.attr("narReadBytes", stats.narReadBytes);
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
nested.attr("narWrite", stats.narWrite);
nested.attr("narWriteAverted", stats.narWriteAverted);
nested.attr("narWriteBytes", stats.narWriteBytes);
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
nested.attr("narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0);
nested.attr("narCompressionSpeed", // MiB/s
stats.narWriteCompressionTimeMs
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
};
: 0.0);
#if NIX_WITH_S3_SUPPORT
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
if (s3Store) {
auto nested2 = nested.object("s3");
auto & s3Stats = s3Store->getS3Stats();
auto jsonS3 = statusJson["s3"] = {
{"put", s3Stats.put.load()},
{"putBytes", s3Stats.putBytes.load()},
{"putTimeMs", s3Stats.putTimeMs.load()},
{"putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"get", s3Stats.get.load()},
{"getBytes", s3Stats.getBytes.load()},
{"getTimeMs", s3Stats.getTimeMs.load()},
{"getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"head", s3Stats.head.load()},
{"costDollarApprox",
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
};
nested2.attr("put", s3Stats.put);
nested2.attr("putBytes", s3Stats.putBytes);
nested2.attr("putTimeMs", s3Stats.putTimeMs);
nested2.attr("putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
nested2.attr("get", s3Stats.get);
nested2.attr("getBytes", s3Stats.getBytes);
nested2.attr("getTimeMs", s3Stats.getTimeMs);
nested2.attr("getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
nested2.attr("head", s3Stats.head);
nested2.attr("costDollarApprox",
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
}
#endif
}
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
// FIXME: use PostgreSQL 9.5 upsert.
txn.exec("delete from SystemStatus where what = 'queue-runner'").no_rows();
txn.exec("insert into SystemStatus values ('queue-runner', $1)", pqxx::params{statusJson.dump()}).no_rows();
txn.exec("delete from SystemStatus where what = 'queue-runner'");
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
txn.exec("notify status_dumped");
txn.commit();
}
@@ -790,7 +790,7 @@ void State::unlock()
{
pqxx::work txn(*conn);
txn.exec("delete from SystemStatus where what = 'queue-runner'").no_rows();
txn.exec("delete from SystemStatus where what = 'queue-runner'");
txn.commit();
}
}
@@ -820,7 +820,7 @@ void State::run(BuildID buildOne)
<< metricsAddr << "/metrics (port " << exposerPort << ")"
<< std::endl;
Store::Config::Params localParams;
Store::Params localParams;
localParams["max-connections"] = "16";
localParams["max-connection-age"] = "600";
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
@@ -868,10 +868,11 @@ void State::run(BuildID buildOne)
pqxx::work txn(*conn);
for (auto & step : steps) {
printMsg(lvlError, "cleaning orphaned step %d of build %d", step.second, step.first);
txn.exec("update BuildSteps set busy = 0, status = $1 where build = $2 and stepnr = $3 and busy != 0",
pqxx::params{(int) bsAborted,
txn.exec_params0
("update BuildSteps set busy = 0, status = $1 where build = $2 and stepnr = $3 and busy != 0",
(int) bsAborted,
step.first,
step.second}).no_rows();
step.second);
}
txn.commit();
} catch (std::exception & e) {
@@ -901,17 +902,10 @@ void State::run(BuildID buildOne)
while (true) {
try {
auto conn(dbPool.get());
try {
receiver dumpStatus_(*conn, "dump_status");
while (true) {
conn->await_notification();
dumpStatus(*conn);
}
} catch (pqxx::broken_connection & connEx) {
printMsg(lvlError, "main thread: %s", connEx.what());
printMsg(lvlError, "main thread: Reconnecting in 10s");
conn.markBad();
sleep(10);
receiver dumpStatus_(*conn, "dump_status");
while (true) {
conn->await_notification();
dumpStatus(*conn);
}
} catch (std::exception & e) {
printMsg(lvlError, "main thread: %s", e.what());
@@ -956,6 +950,7 @@ int main(int argc, char * * argv)
});
settings.verboseBuild = true;
settings.lockCPU = false;
State state{metricsAddrOpt};
if (status)

View File

@@ -1,24 +0,0 @@
srcs = files(
'builder.cc',
'build-remote.cc',
'build-result.cc',
'dispatcher.cc',
'hydra-queue-runner.cc',
'nar-extractor.cc',
'queue-monitor.cc',
)
hydra_queue_runner = executable('hydra-queue-runner',
'hydra-queue-runner.cc',
srcs,
dependencies: [
libhydra_dep,
nix_util_dep,
nix_store_dep,
nix_main_dep,
pqxx_dep,
prom_cpp_core_dep,
prom_cpp_pull_dep,
],
install: true,
)

View File

@@ -1,51 +1,12 @@
#include "nar-extractor.hh"
#include <nix/util/archive.hh>
#include "archive.hh"
#include <unordered_set>
using namespace nix;
struct NarMemberConstructor : CreateRegularFileSink
{
NarMemberData & curMember;
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
std::optional<uint64_t> expectedSize;
NarMemberConstructor(NarMemberData & curMember)
: curMember(curMember)
{ }
void isExecutable() override
{
}
void preallocateContents(uint64_t size) override
{
expectedSize = size;
}
void operator () (std::string_view data) override
{
assert(expectedSize);
*curMember.fileSize += data.size();
hashSink(data);
if (curMember.contents) {
curMember.contents->append(data);
}
assert(curMember.fileSize <= expectedSize);
if (curMember.fileSize == expectedSize) {
auto [hash, len] = hashSink.finish();
assert(curMember.fileSize == len);
curMember.sha256 = hash;
}
}
};
struct Extractor : FileSystemObjectSink
struct Extractor : ParseSink
{
std::unordered_set<Path> filesToKeep {
"/nix-support/hydra-build-products",
@@ -54,40 +15,58 @@ struct Extractor : FileSystemObjectSink
};
NarMemberDatas & members;
std::filesystem::path prefix;
Path toKey(const CanonPath & path)
{
std::filesystem::path p = prefix;
// Conditional to avoid trailing slash
if (!path.isRoot()) p /= path.rel();
return p;
}
NarMemberData * curMember = nullptr;
Path prefix;
Extractor(NarMemberDatas & members, const Path & prefix)
: members(members), prefix(prefix)
{ }
void createDirectory(const CanonPath & path) override
void createDirectory(const Path & path) override
{
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
}
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
void createRegularFile(const Path & path) override
{
NarMemberConstructor nmc {
members.insert_or_assign(toKey(path), NarMemberData {
.type = SourceAccessor::Type::tRegular,
.fileSize = 0,
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
}).first->second,
};
func(nmc);
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
.type = FSAccessor::Type::tRegular,
.fileSize = 0,
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
}).first->second;
}
void createSymlink(const CanonPath & path, const std::string & target) override
std::optional<uint64_t> expectedSize;
std::unique_ptr<HashSink> hashSink;
void preallocateContents(uint64_t size) override
{
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
expectedSize = size;
hashSink = std::make_unique<HashSink>(htSHA256);
}
void receiveContents(std::string_view data) override
{
assert(expectedSize);
assert(curMember);
assert(hashSink);
*curMember->fileSize += data.size();
(*hashSink)(data);
if (curMember->contents) {
curMember->contents->append(data);
}
assert(curMember->fileSize <= expectedSize);
if (curMember->fileSize == expectedSize) {
auto [hash, len] = hashSink->finish();
assert(curMember->fileSize == len);
curMember->sha256 = hash;
hashSink.reset();
}
}
void createSymlink(const Path & path, const std::string & target) override
{
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
}
};

View File

@@ -1,13 +1,13 @@
#pragma once
#include <nix/util/source-accessor.hh>
#include <nix/util/types.hh>
#include <nix/util/serialise.hh>
#include <nix/util/hash.hh>
#include "fs-accessor.hh"
#include "types.hh"
#include "serialise.hh"
#include "hash.hh"
struct NarMemberData
{
nix::SourceAccessor::Type type;
nix::FSAccessor::Type type;
std::optional<uint64_t> fileSize;
std::optional<std::string> contents;
std::optional<nix::Hash> sha256;

View File

@@ -1,11 +1,8 @@
#include "state.hh"
#include "hydra-build-result.hh"
#include <nix/store/globals.hh>
#include <nix/store/parsed-derivations.hh>
#include <nix/util/thread-pool.hh>
#include "globals.hh"
#include <cstring>
#include <signal.h>
using namespace nix;
@@ -13,74 +10,63 @@ using namespace nix;
void State::queueMonitor()
{
while (true) {
auto conn(dbPool.get());
try {
queueMonitorLoop(*conn);
} catch (pqxx::broken_connection & e) {
printMsg(lvlError, "queue monitor: %s", e.what());
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
conn.markBad();
sleep(10);
queueMonitorLoop();
} catch (std::exception & e) {
printError("queue monitor: %s", e.what());
printMsg(lvlError, format("queue monitor: %1%") % e.what());
sleep(10); // probably a DB problem, so don't retry right away
}
}
}
void State::queueMonitorLoop(Connection & conn)
void State::queueMonitorLoop()
{
receiver buildsAdded(conn, "builds_added");
receiver buildsRestarted(conn, "builds_restarted");
receiver buildsCancelled(conn, "builds_cancelled");
receiver buildsDeleted(conn, "builds_deleted");
receiver buildsBumped(conn, "builds_bumped");
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
auto conn(dbPool.get());
receiver buildsAdded(*conn, "builds_added");
receiver buildsRestarted(*conn, "builds_restarted");
receiver buildsCancelled(*conn, "builds_cancelled");
receiver buildsDeleted(*conn, "builds_deleted");
receiver buildsBumped(*conn, "builds_bumped");
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
auto destStore = getDestStore();
unsigned int lastBuildId = 0;
bool quit = false;
while (!quit) {
auto t_before_work = std::chrono::steady_clock::now();
localStore->clearPathInfoCache();
bool done = getQueuedBuilds(conn, destStore);
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
if (buildOne && buildOneDone) quit = true;
auto t_after_work = std::chrono::steady_clock::now();
prom.queue_monitor_time_spent_running.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
/* Sleep until we get notification from the database about an
event. */
if (done && !quit) {
conn.await_notification();
conn->await_notification();
nrQueueWakeups++;
} else
conn.get_notifs();
conn->get_notifs();
if (auto lowestId = buildsAdded.get()) {
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
printMsg(lvlTalkative, "got notification: new builds added to the queue");
}
if (buildsRestarted.get()) {
printMsg(lvlTalkative, "got notification: builds restarted");
lastBuildId = 0; // check all builds
}
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
processQueueChange(conn);
processQueueChange(*conn);
}
if (jobsetSharesChanged.get()) {
printMsg(lvlTalkative, "got notification: jobset shares changed");
processJobsetSharesChange(conn);
processJobsetSharesChange(*conn);
}
auto t_after_sleep = std::chrono::steady_clock::now();
prom.queue_monitor_time_spent_waiting.Increment(
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
}
exit(0);
@@ -94,31 +80,39 @@ struct PreviousFailure : public std::exception {
bool State::getQueuedBuilds(Connection & conn,
ref<Store> destStore)
ref<Store> destStore, unsigned int & lastBuildId)
{
prom.queue_checks_started.Increment();
printInfo("checking the queue for builds...");
printInfo("checking the queue for builds > %d...", lastBuildId);
/* Grab the queued builds from the database, but don't process
them yet (since we don't want a long-running transaction). */
std::vector<BuildID> newIDs;
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
std::map<BuildID, Build::ptr> newBuildsByID;
std::multimap<StorePath, BuildID> newBuildsByPath;
unsigned int newLastBuildId = lastBuildId;
{
pqxx::work txn(conn);
auto res = txn.exec("select builds.id, builds.jobset_id, jobsets.project as project, "
auto res = txn.exec_params
("select builds.id, builds.jobset_id, jobsets.project as project, "
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
"globalPriority, priority from Builds "
"inner join jobsets on builds.jobset_id = jobsets.id "
"where finished = 0 order by globalPriority desc, random()");
"where builds.id > $1 and finished = 0 order by globalPriority desc, builds.id",
lastBuildId);
for (auto const & row : res) {
auto builds_(builds.lock());
BuildID id = row["id"].as<BuildID>();
if (buildOne && id != buildOne) continue;
if (id > newLastBuildId) {
newLastBuildId = id;
prom.queue_max_id.Set(id);
}
if (builds_->count(id)) continue;
auto build = std::make_shared<Build>(
@@ -148,20 +142,21 @@ bool State::getQueuedBuilds(Connection & conn,
createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment();
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
nrAdded++;
newBuildsByID.erase(build->id);
if (!localStore->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */
printError("aborting GC'ed build %1%", build->id);
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
pqxx::params{build->id,
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
build->id,
(int) bsAborted,
time(0)}).no_rows();
time(0));
txn.commit();
build->finishedInDB = true;
nrBuildsDone++;
@@ -191,33 +186,32 @@ bool State::getQueuedBuilds(Connection & conn,
derivation path, then by output path. */
BuildID propagatedFrom = 0;
auto res = txn.exec("select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1",
pqxx::params{localStore->printStorePath(ex.step->drvPath)}).one_row();
auto res = txn.exec_params1
("select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1",
localStore->printStorePath(ex.step->drvPath));
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
if (!propagatedFrom) {
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
auto res = optOutputPath
? txn.exec(
std::string { common } + " and path = $1",
pqxx::params{localStore->printStorePath(*optOutputPath)})
: txn.exec(
std::string { common } + " and drvPath = $1 and name = $2",
pqxx::params{localStore->printStorePath(ex.step->drvPath), outputName});
if (!res[0][0].is_null()) {
propagatedFrom = res[0][0].as<BuildID>();
break;
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
if (i.second.second) {
auto res = txn.exec_params
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
localStore->printStorePath(*i.second.second));
if (!res[0][0].is_null()) {
propagatedFrom = res[0][0].as<BuildID>();
break;
}
}
}
}
createBuildStep(txn, 0, build->id, ex.step, "", bsCachedFailure, "", propagatedFrom);
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3, isCachedBuild = 1, notificationPendingSince = $3 "
txn.exec_params
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3, isCachedBuild = 1, notificationPendingSince = $3 "
"where id = $1 and finished = 0",
pqxx::params{build->id,
build->id,
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
time(0)}).no_rows();
time(0));
notifyBuildFinished(txn, build->id, {});
txn.commit();
build->finishedInDB = true;
@@ -242,10 +236,12 @@ bool State::getQueuedBuilds(Connection & conn,
/* If we didn't get a step, it means the step's outputs are
all valid. So we mark this as a finished, cached build. */
if (!step) {
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
auto drv = localStore->readDerivation(build->drvPath);
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
addRoot(i.second);
for (auto & i : drv.outputsAndOptPaths(*localStore))
if (i.second.second)
addRoot(*i.second.second);
{
auto mc = startDbUpdate();
@@ -296,7 +292,7 @@ bool State::getQueuedBuilds(Connection & conn,
try {
createBuild(build);
} catch (Error & e) {
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
e.addTrace({}, hintfmt("while loading build %d: ", build->id));
throw;
}
@@ -306,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Add the new runnable build steps to runnable and wake up
the builder threads. */
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
for (auto & r : newRunnable)
makeRunnable(r);
@@ -316,13 +312,15 @@ bool State::getQueuedBuilds(Connection & conn,
/* Stop after a certain time to allow priority bumps to be
processed. */
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
prom.queue_checks_early_exits.Increment();
break;
}
}
}
prom.queue_checks_finished.Increment();
lastBuildId = newBuildsByID.empty() ? newLastBuildId : newBuildsByID.begin()->first - 1;
return newBuildsByID.empty();
}
@@ -360,13 +358,13 @@ void State::processQueueChange(Connection & conn)
for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first);
if (b == currentIds.end()) {
printInfo("discarding cancelled build %1%", i->first);
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here.
continue;
}
if (i->second->globalPriority < b->second) {
printInfo("priority of build %1% increased", i->first);
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
i->second->globalPriority = b->second;
i->second->propagatePriorities();
}
@@ -401,34 +399,6 @@ void State::processQueueChange(Connection & conn)
}
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
ref<Store> destStore,
const std::map<DrvOutput, std::optional<StorePath>> & paths)
{
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
ThreadPool tp;
for (auto & [output, maybeOutputPath] : paths) {
if (!maybeOutputPath) {
auto missing(missing_.lock());
missing->insert({output, maybeOutputPath});
} else {
tp.enqueue([&] {
if (!destStore->isValidPath(*maybeOutputPath)) {
auto missing(missing_.lock());
missing->insert({output, maybeOutputPath});
}
});
}
}
tp.process();
auto missing(missing_.lock());
return *missing;
}
Step::ptr State::createStep(ref<Store> destStore,
Connection & conn, Build::ptr build, const StorePath & drvPath,
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
@@ -487,24 +457,17 @@ Step::ptr State::createStep(ref<Store> destStore,
it's not runnable yet, and other threads won't make it
runnable while step->created == false. */
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
{
try {
step->drvOptions = std::make_unique<DerivationOptions>(
DerivationOptions::fromStructuredAttrs(
step->drv->env,
step->drv->structuredAttrs ? &*step->drv->structuredAttrs : nullptr));
} catch (Error & e) {
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
throw;
}
}
step->parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *step->drv);
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
step->preferLocalBuild = step->parsedDrv->willBuildLocally(*localStore);
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
step->systemType = step->drv->platform;
{
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
auto i = step->drv->env.find("requiredSystemFeatures");
StringSet features;
if (i != step->drv->env.end())
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
if (step->preferLocalBuild)
features.insert("local");
if (!features.empty()) {
@@ -518,40 +481,26 @@ Step::ptr State::createStep(ref<Store> destStore,
throw PreviousFailure{step};
/* Are all outputs valid? */
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
std::map<DrvOutput, std::optional<StorePath>> paths;
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
auto outputHash = outputHashes.at(outputName);
paths.insert({{outputHash, outputName}, maybeOutputPath});
}
auto missing = getMissingRemotePaths(destStore, paths);
bool valid = missing.empty();
bool valid = true;
DerivationOutputs missing;
for (auto & i : step->drv->outputs)
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
valid = false;
missing.insert_or_assign(i.first, i.second);
}
/* Try to copy the missing paths from the local store or from
substitutes. */
if (!missing.empty()) {
size_t avail = 0;
for (auto & [i, pathOpt] : missing) {
// If we don't know the output path from the destination
// store, see if the local store can tell us.
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
if (auto maybeRealisation = localStore->queryRealisation(i))
pathOpt = maybeRealisation->outPath;
if (!pathOpt) {
// No hope of getting the store object if we don't know
// the path.
continue;
}
auto & path = *pathOpt;
if (/* localStore != destStore && */ localStore->isValidPath(path))
for (auto & i : missing) {
auto path = i.second.path(*localStore, step->drv->name, i.first);
if (/* localStore != destStore && */ localStore->isValidPath(*path))
avail++;
else if (useSubstitutes) {
SubstitutablePathInfos infos;
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
if (infos.size() == 1)
avail++;
}
@@ -559,29 +508,26 @@ Step::ptr State::createStep(ref<Store> destStore,
if (missing.size() == avail) {
valid = true;
for (auto & [i, pathOpt] : missing) {
// If we found everything, then we should know the path
// to every missing store object now.
assert(pathOpt);
auto & path = *pathOpt;
for (auto & i : missing) {
auto path = i.second.path(*localStore, step->drv->name, i.first);
try {
time_t startTime = time(0);
if (localStore->isValidPath(path))
if (localStore->isValidPath(*path))
printInfo("copying output %1% of %2% from local store",
localStore->printStorePath(path),
localStore->printStorePath(*path),
localStore->printStorePath(drvPath));
else {
printInfo("substituting output %1% of %2%",
localStore->printStorePath(path),
localStore->printStorePath(*path),
localStore->printStorePath(drvPath));
localStore->ensurePath(path);
localStore->ensurePath(*path);
// FIXME: should copy directly from substituter to destStore.
}
copyClosure(*localStore, *destStore,
StorePathSet { path },
StorePathSet { *path },
NoRepair, CheckSigs, NoSubstitute);
time_t stopTime = time(0);
@@ -589,13 +535,13 @@ Step::ptr State::createStep(ref<Store> destStore,
{
auto mc = startDbUpdate();
pqxx::work txn(conn);
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
txn.commit();
}
} catch (Error & e) {
printError("while copying/substituting output %s of %s: %s",
localStore->printStorePath(path),
localStore->printStorePath(*path),
localStore->printStorePath(drvPath),
e.what());
valid = false;
@@ -615,7 +561,7 @@ Step::ptr State::createStep(ref<Store> destStore,
printMsg(lvlDebug, "creating build step %1%", localStore->printStorePath(drvPath));
/* Create steps for the dependencies. */
for (auto & i : step->drv->inputDrvs.map) {
for (auto & i : step->drv->inputDrvs) {
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
if (dep) {
auto step_(step->state.lock());
@@ -650,8 +596,10 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
if (i != jobsets_->end()) return i->second;
}
auto res = txn.exec("select schedulingShares from Jobsets where id = $1",
pqxx::params{jobsetID}).one_row();
auto res = txn.exec_params1
("select schedulingShares from Jobsets where id = $1",
jobsetID);
if (res.empty()) throw Error("missing jobset - can't happen");
auto shares = res["schedulingShares"].as<unsigned int>();
@@ -659,10 +607,11 @@ Jobset::ptr State::createJobset(pqxx::work & txn,
jobset->setShares(shares);
/* Load the build steps from the last 24 hours. */
auto res2 = txn.exec("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
auto res2 = txn.exec_params
("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
pqxx::params{time(0) - Jobset::schedulingWindow * 10,
jobsetID});
time(0) - Jobset::schedulingWindow * 10,
jobsetID);
for (auto const & row : res2) {
time_t startTime = row["startTime"].as<time_t>();
time_t stopTime = row["stopTime"].as<time_t>();
@@ -691,22 +640,21 @@ void State::processJobsetSharesChange(Connection & conn)
}
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
{
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
{
pqxx::work txn(conn);
for (auto & [name, output] : derivationOutputs) {
auto r = txn.exec("select id, buildStatus, releaseName, closureSize, size from Builds b "
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
auto r = txn.exec_params
("select id, buildStatus, releaseName, closureSize, size from Builds b "
"join BuildOutputs o on b.id = o.build "
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
pqxx::params{localStore->printStorePath(output)});
localStore->printStorePath(*output.second));
if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>();
printInfo("reusing build %d", id);
printMsg(lvlInfo, format("reusing build %d") % id);
BuildOutput res;
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
@@ -714,8 +662,9 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
res.closureSize = r[0][3].is_null() ? 0 : r[0][3].as<uint64_t>();
res.size = r[0][4].is_null() ? 0 : r[0][4].as<uint64_t>();
auto products = txn.exec("select type, subtype, fileSize, sha256hash, path, name, defaultPath from BuildProducts where build = $1 order by productnr",
pqxx::params{id});
auto products = txn.exec_params
("select type, subtype, fileSize, sha256hash, path, name, defaultPath from BuildProducts where build = $1 order by productnr",
id);
for (auto row : products) {
BuildProduct product;
@@ -728,7 +677,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
product.fileSize = row[2].as<off_t>();
}
if (!row[3].is_null())
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
if (!row[4].is_null())
product.path = row[4].as<std::string>();
product.name = row[5].as<std::string>();
@@ -737,8 +686,9 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
res.products.emplace_back(product);
}
auto metrics = txn.exec("select name, unit, value from BuildMetrics where build = $1",
pqxx::params{id});
auto metrics = txn.exec_params
("select name, unit, value from BuildMetrics where build = $1",
id);
for (auto row : metrics) {
BuildMetric metric;
@@ -754,5 +704,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
}
NarMemberDatas narMembers;
return getBuildOutput(destStore, narMembers, derivationOutputs);
return getBuildOutput(destStore, narMembers, drv);
}

View File

@@ -7,7 +7,6 @@
#include <memory>
#include <queue>
#include <regex>
#include <semaphore>
#include <prometheus/counter.h>
#include <prometheus/gauge.h>
@@ -15,19 +14,13 @@
#include "db.hh"
#include <nix/store/derivations.hh>
#include <nix/store/derivation-options.hh>
#include <nix/store/pathlocks.hh>
#include <nix/util/pool.hh>
#include <nix/store/build-result.hh>
#include <nix/store/store-api.hh>
#include <nix/util/sync.hh>
#include "parsed-derivations.hh"
#include "pathlocks.hh"
#include "pool.hh"
#include "build-result.hh"
#include "store-api.hh"
#include "sync.hh"
#include "nar-extractor.hh"
#include <nix/store/serve-protocol.hh>
#include <nix/store/serve-protocol-impl.hh>
#include <nix/store/serve-protocol-connection.hh>
#include <nix/store/machines.hh>
#include <nix/store/globals.hh>
typedef unsigned int BuildID;
@@ -61,7 +54,6 @@ typedef enum {
ssConnecting = 10,
ssSendingInputs = 20,
ssBuilding = 30,
ssWaitingForLocalSlot = 35,
ssReceivingOutputs = 40,
ssPostProcessing = 50,
} StepState;
@@ -86,8 +78,6 @@ struct RemoteResult
{
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
}
void updateWithBuildResult(const nix::BuildResult &);
};
@@ -172,8 +162,8 @@ struct Step
nix::StorePath drvPath;
std::unique_ptr<nix::Derivation> drv;
std::unique_ptr<nix::DerivationOptions> drvOptions;
nix::StringSet requiredSystemFeatures;
std::unique_ptr<nix::ParsedDerivation> parsedDrv;
std::set<std::string> requiredSystemFeatures;
bool preferLocalBuild;
bool isDeterministic;
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
@@ -241,10 +231,18 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
struct Machine : nix::Machine
struct Machine
{
typedef std::shared_ptr<Machine> ptr;
bool enabled{true};
std::string sshName, sshKey;
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
unsigned int maxJobs = 1;
float speedFactor = 1.0;
std::string sshPublicHostKey;
struct State {
typedef std::shared_ptr<State> ptr;
counter currentJobs{0};
@@ -294,13 +292,11 @@ struct Machine : nix::Machine
return true;
}
bool isLocalhost() const;
// A connection to a machine
struct Connection : nix::ServeProto::BasicClientConnection {
// Backpointer to the machine
ptr machine;
};
bool isLocalhost()
{
std::regex r("^(ssh://|ssh-ng://)?localhost$");
return std::regex_search(sshName, r);
}
};
@@ -354,13 +350,9 @@ private:
/* The build machines. */
std::mutex machinesReadyLock;
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
typedef std::map<std::string, Machine::ptr> Machines;
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
/* Throttler for CPU-bound local work. */
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
/* Various stats. */
time_t startedAt;
counter nrBuildsRead{0};
@@ -370,7 +362,6 @@ private:
counter nrStepsDone{0};
counter nrStepsBuilding{0};
counter nrStepsCopyingTo{0};
counter nrStepsWaitingForDownloadSlot{0};
counter nrStepsCopyingFrom{0};
counter nrStepsWaiting{0};
counter nrUnsupportedSteps{0};
@@ -401,6 +392,7 @@ private:
struct MachineReservation
{
typedef std::shared_ptr<MachineReservation> ptr;
State & state;
Step::ptr step;
Machine::ptr machine;
@@ -438,7 +430,7 @@ private:
/* How often the build steps of a jobset should be repeated in
order to detect non-determinism. */
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
bool uploadLogsToBinaryCache;
@@ -458,12 +450,7 @@ private:
prometheus::Counter& queue_steps_created;
prometheus::Counter& queue_checks_early_exits;
prometheus::Counter& queue_checks_finished;
prometheus::Counter& dispatcher_time_spent_running;
prometheus::Counter& dispatcher_time_spent_waiting;
prometheus::Counter& queue_monitor_time_spent_running;
prometheus::Counter& queue_monitor_time_spent_waiting;
prometheus::Gauge& queue_max_id;
PromMetrics();
};
@@ -498,28 +485,23 @@ private:
const std::string & machine);
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
void queueMonitor();
void queueMonitorLoop(Connection & conn);
void queueMonitorLoop();
/* Check the queue for new builds. */
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
bool getQueuedBuilds(Connection & conn,
nix::ref<nix::Store> destStore, unsigned int & lastBuildId);
/* Handle cancellation, deletion and priority bumps. */
void processQueueChange(Connection & conn);
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
const nix::StorePath & drvPath);
/* Returns paths missing from the remote store. Paths are processed in
* parallel to work around the possible latency of remote stores. */
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
nix::ref<nix::Store> destStore,
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
const nix::Derivation & drv);
Step::ptr createStep(nix::ref<nix::Store> store,
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
@@ -550,19 +532,19 @@ private:
void abortUnsupported();
void builder(std::unique_ptr<MachineReservation> reservation);
void builder(MachineReservation::ptr reservation);
/* Perform the given build step. Return true if the step is to be
retried. */
enum StepResult { sDone, sRetry, sMaybeCancelled };
StepResult doBuildStep(nix::ref<nix::Store> destStore,
std::unique_ptr<MachineReservation> reservation,
MachineReservation::ptr reservation,
std::shared_ptr<ActiveStep> activeStep);
void buildRemote(nix::ref<nix::Store> destStore,
std::unique_ptr<MachineReservation> reservation,
Machine::ptr machine, Step::ptr step,
const nix::ServeProto::BuildOptions & buildOptions,
unsigned int maxSilentTime, unsigned int buildTimeout,
unsigned int repeats,
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
std::function<void(StepState)> updateStep,
NarMemberDatas & narMembers);

View File

@@ -4,6 +4,7 @@ use strict;
use warnings;
use base 'Hydra::Base::Controller::REST';
use List::SomeUtils qw(any);
use Nix::Store;
use Hydra::Helper::Nix;
use Hydra::Helper::CatalystUtils;
@@ -29,7 +30,7 @@ sub getChannelData {
my $outputs = {};
foreach my $output (@outputs) {
my $outPath = $output->get_column("outpath");
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
next if $checkValidity && !isValidPath($outPath);
$outputs->{$output->get_column("outname")} = $outPath;
push @storePaths, $outPath;
# Put the system type in the manifest (for top-level

View File

@@ -95,7 +95,6 @@ sub get_legacy_ldap_config {
"hydra_bump-to-front" => [ "bump-to-front" ],
"hydra_cancel-build" => [ "cancel-build" ],
"hydra_create-projects" => [ "create-projects" ],
"hydra_eval-jobset" => [ "eval-jobset" ],
"hydra_restart-jobs" => [ "restart-jobs" ],
},
};
@@ -160,7 +159,6 @@ sub valid_roles {
"bump-to-front",
"cancel-build",
"create-projects",
"eval-jobset",
"restart-jobs",
];
}

View File

@@ -12,9 +12,6 @@ use DateTime;
use Digest::SHA qw(sha256_hex);
use Text::Diff;
use IPC::Run qw(run);
use Digest::SHA qw(hmac_sha256_hex);
use String::Compare::ConstantTime qw(equals);
use IPC::Run3;
sub api : Chained('/') PathPart('api') CaptureArgs(0) {
@@ -219,13 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
} elsif ($type eq "git") {
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
die if ! -d $clonePath;
my ($stdout1, $stderr1);
run3(['git', '-C', $clonePath, 'log', "$rev1..$rev2"], \undef, \$stdout1, \$stderr1);
$diff .= $stdout1 if $? == 0;
my ($stdout2, $stderr2);
run3(['git', '-C', $clonePath, 'diff', "$rev1..$rev2"], \undef, \$stdout2, \$stderr2);
$diff .= $stdout2 if $? == 0;
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
}
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
@@ -247,8 +239,6 @@ sub triggerJobset {
sub push : Chained('api') PathPart('push') Args(0) {
my ($self, $c) = @_;
requirePost($c);
$c->{stash}->{json}->{jobsetsTriggered} = [];
my $force = exists $c->request->query_params->{force};
@@ -256,24 +246,19 @@ sub push : Chained('api') PathPart('push') Args(0) {
foreach my $s (@jobsets) {
my ($p, $j) = parseJobsetName($s);
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
requireEvalJobsetPrivileges($c, $jobset->project);
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
triggerJobset($self, $c, $jobset, $force);
}
my @repos = split /,/, ($c->request->query_params->{repos} // "");
foreach my $r (@repos) {
my @jobsets = $c->model('DB::Jobsets')->search(
triggerJobset($self, $c, $_, $force) foreach $c->model('DB::Jobsets')->search(
{ 'project.enabled' => 1, 'me.enabled' => 1 },
{
join => 'project',
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
order_by => 'me.id DESC'
});
foreach my $jobset (@jobsets) {
requireEvalJobsetPrivileges($c, $jobset->project);
triggerJobset($self, $c, $jobset, $force)
}
}
$self->status_ok(
@@ -282,84 +267,13 @@ sub push : Chained('api') PathPart('push') Args(0) {
);
}
sub verifyWebhookSignature {
my ($c, $platform, $header_name, $signature_prefix) = @_;
# Get secrets from config
my $webhook_config = $c->config->{webhooks} // {};
my $platform_config = $webhook_config->{$platform} // {};
my $secrets = $platform_config->{secret};
# Normalize to array
$secrets = [] unless defined $secrets;
$secrets = [$secrets] unless ref($secrets) eq 'ARRAY';
# Trim whitespace from secrets
my @secrets = grep { defined && length } map { s/^\s+|\s+$//gr } @$secrets;
if (@secrets) {
my $signature = $c->request->header($header_name);
if (!$signature) {
$c->log->warn("Webhook authentication failed for $platform: Missing signature from IP " . $c->request->address);
$c->response->status(401);
$c->stash->{json} = { error => "Missing webhook signature" };
$c->forward('View::JSON');
return 0;
}
# Get the raw body content from the buffered PSGI input
# For JSON requests, Catalyst will have already read and buffered the body
my $input = $c->request->env->{'psgi.input'};
$input->seek(0, 0);
local $/;
my $payload = <$input>;
$input->seek(0, 0); # Reset for any other consumers
unless (defined $payload && length $payload) {
$c->log->warn("Webhook authentication failed for $platform: Empty request body from IP " . $c->request->address);
$c->response->status(400);
$c->stash->{json} = { error => "Empty request body" };
$c->forward('View::JSON');
return 0;
}
my $valid = 0;
for my $secret (@secrets) {
my $expected = $signature_prefix . hmac_sha256_hex($payload, $secret);
if (equals($signature, $expected)) {
$valid = 1;
last;
}
}
if (!$valid) {
$c->log->warn("Webhook authentication failed for $platform: Invalid signature from IP " . $c->request->address);
$c->response->status(401);
$c->stash->{json} = { error => "Invalid webhook signature" };
$c->forward('View::JSON');
return 0;
}
return 1;
} else {
$c->log->warn("Webhook authentication failed for $platform: Unable to validate signature from IP " . $c->request->address . " because no secrets are configured");
$c->response->status(401);
$c->stash->{json} = { error => "Invalid webhook signature" };
$c->forward('View::JSON');
return 0;
}
}
sub push_github : Chained('api') PathPart('push-github') Args(0) {
my ($self, $c) = @_;
$c->{stash}->{json}->{jobsetsTriggered} = [];
return unless verifyWebhookSignature($c, 'github', 'X-Hub-Signature-256', 'sha256=');
my $in = $c->request->{data};
my $owner = ($in->{repository}->{owner}->{name} // $in->{repository}->{owner}->{login}) or die;
my $owner = $in->{repository}->{owner}->{name} or die;
my $repo = $in->{repository}->{name} or die;
print STDERR "got push from GitHub repository $owner/$repo\n";
@@ -371,26 +285,6 @@ sub push_github : Chained('api') PathPart('push-github') Args(0) {
$c->response->body("");
}
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
my ($self, $c) = @_;
$c->{stash}->{json}->{jobsetsTriggered} = [];
# Note: Gitea doesn't use sha256= prefix
return unless verifyWebhookSignature($c, 'gitea', 'X-Gitea-Signature', '');
my $in = $c->request->{data};
my $url = $in->{repository}->{clone_url} or die;
$url =~ s/.git$//;
print STDERR "got push from Gitea repository $url\n";
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
{ 'project.enabled' => 1, 'me.enabled' => 1 },
{ join => 'project'
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
});
$c->response->body("");
}
1;

View File

@@ -10,12 +10,11 @@ use File::Basename;
use File::LibMagic;
use File::stat;
use Data::Dump qw(dump);
use Nix::Store;
use Nix::Config;
use List::SomeUtils qw(all);
use Encode;
use JSON::PP;
use IPC::Run qw(run);
use IPC::Run3;
use WWW::Form::UrlEncoded::PP qw();
use feature 'state';
@@ -79,16 +78,14 @@ sub build_GET {
$c->stash->{template} = 'build.tt';
$c->stash->{isLocalStore} = isLocalStore();
# XXX: If the derivation is content-addressed then this will always return
# false because `$_->path` will be empty
$c->stash->{available} =
$c->stash->{isLocalStore}
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
? all { isValidPath($_->path) } $build->buildoutputs->all
: 1;
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
if ($build->finished && $build->iscachedbuild) {
my $path = ($build->buildoutputs)[0]->path or undef;
my $path = ($build->buildoutputs)[0]->path or die;
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
if (defined $cachedBuildStep) {
$c->stash->{cachedBuild} = $cachedBuildStep->build;
@@ -142,7 +139,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
$c->stash->{step} = $step;
my $drvPath = $step->drvpath;
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
showLog($c, $mode, $log_uri);
}
@@ -151,7 +148,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
my ($self, $c, $mode) = @_;
my $drvPath = $c->stash->{build}->drvpath;
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
showLog($c, $mode, $log_uri);
}
@@ -212,7 +209,7 @@ sub checkPath {
sub serveFile {
my ($c, $path) = @_;
my $res = runCommand(cmd => ["nix", "--experimental-features", "nix-command",
my $res = run(cmd => ["nix", "--experimental-features", "nix-command",
"ls-store", "--store", getStoreUri(), "--json", "$path"]);
if ($res->{status}) {
@@ -236,25 +233,14 @@ sub serveFile {
}
elsif ($ls->{type} eq "regular") {
# Have the hosted data considered its own origin to avoid being a giant
# XSS hole.
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
$c->stash->{'plain'} = { data => grab(cmd => ["nix", "--experimental-features", "nix-command",
"store", "cat", "--store", getStoreUri(), "$path"]) };
# Detect MIME type.
my $type = "text/plain";
if ($path =~ /.*\.(\S{1,})$/xms) {
my $ext = $1;
my $mimeTypes = MIME::Types->new(only_complete => 1);
my $t = $mimeTypes->mimeTypeOf($ext);
$type = ref $t ? $t->type : $t if $t;
} else {
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
$type = $info->{mime_with_encoding};
}
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
my $type = $info->{mime_with_encoding};
$c->response->content_type($type);
$c->forward('Hydra::View::Plain');
}
@@ -312,7 +298,7 @@ sub output : Chained('buildChain') PathPart Args(1) {
error($c, "This build is not finished yet.") unless $build->finished;
my $output = $build->buildoutputs->find({name => $outputName});
notFound($c, "This build has no output named $outputName") unless defined $output;
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
gone($c, "Output is no longer available.") unless isValidPath $output->path;
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
$c->stash->{current_view} = 'NixNAR';
@@ -350,21 +336,19 @@ sub contents : Chained('buildChain') PathPart Args(1) {
notFound($c, "Product $path has disappeared.") unless -e $path;
# Sanitize $path to prevent shell injection attacks.
$path =~ /^\/[\/[A-Za-z0-9_\-\.=+:]+$/ or die "Filename contains illegal characters.\n";
# FIXME: don't use shell invocations below.
# FIXME: use nix store cat
my $res;
if ($product->type eq "nix-build" && -d $path) {
# FIXME: use nix ls-store -R --json
# We need to use a pipe between find and xargs, so we'll use IPC::Run
my $error;
# Run find with absolute path and post-process to get relative paths
my $success = run(['find', $path, '-print0'], '|', ['xargs', '-0', 'ls', '-ld', '--'], \$res, \$error);
error($c, "`find $path -print0 | xargs -0 ls -ld --' error: $error") unless $success;
# Strip the base path to show relative paths
my $escaped_path = quotemeta($path);
$res =~ s/^(.*\s)$escaped_path(\/|$)/$1.$2/mg;
$res = `cd '$path' && find . -print0 | xargs -0 ls -ld --`;
error($c, "`ls -lR' error: $?") if $? != 0;
#my $baseuri = $c->uri_for('/build', $c->stash->{build}->id, 'download', $product->productnr);
#$baseuri .= "/".$product->name if $product->name;
@@ -372,59 +356,34 @@ sub contents : Chained('buildChain') PathPart Args(1) {
}
elsif ($path =~ /\.rpm$/) {
my ($stdout1, $stderr1);
run3(['rpm', '--query', '--info', '--package', $path], \undef, \$stdout1, \$stderr1);
error($c, "RPM error: $stderr1") if $? != 0;
$res = $stdout1;
$res = `rpm --query --info --package '$path'`;
error($c, "RPM error: $?") if $? != 0;
$res .= "===\n";
my ($stdout2, $stderr2);
run3(['rpm', '--query', '--list', '--verbose', '--package', $path], \undef, \$stdout2, \$stderr2);
error($c, "RPM error: $stderr2") if $? != 0;
$res .= $stdout2;
$res .= `rpm --query --list --verbose --package '$path'`;
error($c, "RPM error: $?") if $? != 0;
}
elsif ($path =~ /\.deb$/) {
my ($stdout1, $stderr1);
run3(['dpkg-deb', '--info', $path], \undef, \$stdout1, \$stderr1);
error($c, "`dpkg-deb' error: $stderr1") if $? != 0;
$res = $stdout1;
$res = `dpkg-deb --info '$path'`;
error($c, "`dpkg-deb' error: $?") if $? != 0;
$res .= "===\n";
my ($stdout2, $stderr2);
run3(['dpkg-deb', '--contents', $path], \undef, \$stdout2, \$stderr2);
error($c, "`dpkg-deb' error: $stderr2") if $? != 0;
$res .= $stdout2;
$res .= `dpkg-deb --contents '$path'`;
error($c, "`dpkg-deb' error: $?") if $? != 0;
}
elsif ($path =~ /\.(tar(\.gz|\.bz2|\.xz|\.lzma)?|tgz)$/ ) {
my ($stdout, $stderr);
run3(['tar', 'tvfa', $path], \undef, \$stdout, \$stderr);
error($c, "`tar' error: $stderr") if $? != 0;
$res = $stdout;
$res = `tar tvfa '$path'`;
error($c, "`tar' error: $?") if $? != 0;
}
elsif ($path =~ /\.(zip|jar)$/ ) {
my ($stdout, $stderr);
run3(['unzip', '-v', $path], \undef, \$stdout, \$stderr);
error($c, "`unzip' error: $stderr") if $? != 0;
$res = $stdout;
$res = `unzip -v '$path'`;
error($c, "`unzip' error: $?") if $? != 0;
}
elsif ($path =~ /\.iso$/ ) {
# Run first isoinfo command
my ($stdout1, $stderr1);
run3(['isoinfo', '-d', '-i', $path], \undef, \$stdout1, \$stderr1);
error($c, "`isoinfo' error: $stderr1") if $? != 0;
$res = $stdout1;
# Run second isoinfo command
my ($stdout2, $stderr2);
run3(['isoinfo', '-l', '-R', '-i', $path], \undef, \$stdout2, \$stderr2);
error($c, "`isoinfo' error: $stderr2") if $? != 0;
$res .= $stdout2;
$res = `isoinfo -d -i '$path' && isoinfo -l -R -i '$path'`;
error($c, "`isoinfo' error: $?") if $? != 0;
}
else {
@@ -456,7 +415,7 @@ sub getDependencyGraph {
};
$$done{$path} = $node;
my @refs;
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
foreach my $ref (queryReferences($path)) {
next if $ref eq $path;
next unless $runtime || $ref =~ /\.drv$/;
getDependencyGraph($self, $c, $runtime, $done, $ref);
@@ -464,7 +423,7 @@ sub getDependencyGraph {
}
# Show in reverse topological order to flatten the graph.
# Should probably do a proper BFS.
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
my @sorted = reverse topoSortPaths(@refs);
$node->{refs} = [map { $$done{$_} } @sorted];
}
@@ -477,7 +436,7 @@ sub build_deps : Chained('buildChain') PathPart('build-deps') {
my $build = $c->stash->{build};
my $drvPath = $build->drvpath;
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
error($c, "Derivation no longer available.") unless isValidPath $drvPath;
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
@@ -492,7 +451,7 @@ sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
requireLocalStore($c);
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
error($c, "Build outputs no longer available.") unless all { isValidPath($_) } @outPaths;
my $done = {};
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
@@ -512,7 +471,7 @@ sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
if (isLocalStore) {
foreach my $out ($build->buildoutputs) {
notFound($c, "Path " . $out->path . " is no longer available.")
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
unless isValidPath($out->path);
}
}

View File

@@ -364,21 +364,6 @@ sub evals_GET {
);
}
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
sub errors_GET {
my ($self, $c) = @_;
$c->stash->{template} = 'eval-error.tt';
my $jobsetName = $c->stash->{params}->{name};
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
{ name => $jobsetName },
{ '+columns' => { 'errormsg' => 'errormsg' } }
);
$self->status_ok($c, entity => $c->stash->{jobset});
}
# Redirect to the latest finished evaluation of this jobset.
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {

View File

@@ -76,9 +76,7 @@ sub view_GET {
$c->stash->{removed} = $diff->{removed};
$c->stash->{unfinished} = $diff->{unfinished};
$c->stash->{aborted} = $diff->{aborted};
$c->stash->{totalAborted} = $diff->{totalAborted};
$c->stash->{totalFailed} = $diff->{totalFailed};
$c->stash->{totalQueued} = $diff->{totalQueued};
$c->stash->{failed} = $diff->{failed};
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
@@ -88,17 +86,6 @@ sub view_GET {
);
}
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
sub errors_GET {
my ($self, $c) = @_;
$c->stash->{template} = 'eval-error.tt';
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
$self->status_ok($c, entity => $c->stash->{eval});
}
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
my ($self, $c) = @_;

View File

@@ -9,21 +9,15 @@ use Hydra::Helper::CatalystUtils;
use Hydra::View::TT;
use Nix::Store;
use Nix::Config;
use Number::Bytes::Human qw(format_bytes);
use Encode;
use File::Basename;
use JSON::MaybeXS;
use HTML::Entities;
use IPC::Run3;
use List::Util qw[min max];
use List::SomeUtils qw{any};
use Net::Prometheus;
use Types::Standard qw/StrMatch/;
use WWW::Form::UrlEncoded::PP qw();
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
# Put this controller at top-level.
__PACKAGE__->config->{namespace} = '';
@@ -38,7 +32,6 @@ sub noLoginNeeded {
return $whitelisted ||
$c->request->path eq "api/push-github" ||
$c->request->path eq "api/push-gitea" ||
$c->request->path eq "google-login" ||
$c->request->path eq "github-redirect" ||
$c->request->path eq "github-login" ||
@@ -54,13 +47,11 @@ sub begin :Private {
$c->stash->{curUri} = $c->request->uri;
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
$c->stash->{curTime} = time;
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
$c->stash->{flashMsg} = $c->flash->{flashMsg};
$c->stash->{successMsg} = $c->flash->{successMsg};
$c->stash->{localStore} = isLocalStore;
$c->stash->{isPrivateHydra} = $c->config->{private} // "0" ne "0";
@@ -86,7 +77,7 @@ sub begin :Private {
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
# XSRF protection: require POST requests to have the same origin.
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github") {
my $referer = $c->req->header('Referer');
$referer //= $c->req->header('Origin');
my $base = $c->req->base;
@@ -166,7 +157,7 @@ sub status_GET {
{ "buildsteps.busy" => { '!=', 0 } },
{ order_by => ["globalpriority DESC", "id"],
join => "buildsteps",
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
columns => [@buildListColumns]
})]
);
}
@@ -178,14 +169,8 @@ sub queue_runner_status_GET {
my ($self, $c) = @_;
#my $status = from_json($c->model('DB::SystemStatus')->find('queue-runner')->status);
my ($stdout, $stderr);
run3(['hydra-queue-runner', '--status'], \undef, \$stdout, \$stderr);
my $status;
if ($? != 0) {
$status = { status => "unknown" };
} else {
$status = decode_json($stdout);
}
my $status = decode_json(`hydra-queue-runner --status`);
if ($?) { $status->{status} = "unknown"; }
my $json = JSON->new->pretty()->canonical();
$c->stash->{template} = 'queue-runner-status.tt';
@@ -198,10 +183,8 @@ sub machines :Local Args(0) {
my ($self, $c) = @_;
my $machines = getMachines;
# Add entry for localhost. The implicit addition is not needed with queue runner v2
if (not $c->config->{'queue_runner_endpoint'}) {
$machines->{''} //= {};
}
# Add entry for localhost.
$machines->{''} //= {};
delete $machines->{'localhost'};
my $status = $c->model('DB::SystemStatus')->find("queue-runner");
@@ -209,11 +192,9 @@ sub machines :Local Args(0) {
my $ms = decode_json($status->status)->{"machines"};
foreach my $name (keys %{$ms}) {
$name = "" if $name eq "localhost";
my $outName = $name;
$outName = "" if $name eq "ssh://localhost";
$machines->{$outName} //= {disabled => 1};
$machines->{$outName}->{nrStepsDone} = $ms->{$name}->{nrStepsDone};
$machines->{$outName}->{avgStepBuildTime} = $ms->{$name}->{avgStepBuildTime} // 0;
$machines->{$name} //= {disabled => 1};
$machines->{$name}->{nrStepsDone} = $ms->{$name}->{nrStepsDone};
$machines->{$name}->{avgStepBuildTime} = $ms->{$name}->{avgStepBuildTime} // 0;
}
}
@@ -226,19 +207,6 @@ sub machines :Local Args(0) {
"where busy != 0 order by machine, stepnr",
{ Slice => {} });
$c->stash->{template} = 'machine-status.tt';
$c->stash->{human_bytes} = sub {
my ($bytes) = @_;
return format_bytes($bytes, si => 1);
};
$c->stash->{pretty_load} = sub {
my ($load) = @_;
return sprintf('%.2f', $load);
};
$c->stash->{pretty_percent} = sub {
my ($percent) = @_;
my $ret = sprintf('%.2f', $percent);
return ('&nbsp;' x (6 - length($ret))) . encode_entities($ret);
};
$self->status_ok($c, entity => $c->stash->{machines});
}
@@ -358,7 +326,7 @@ sub nar :Local :Args(1) {
else {
$path = $Nix::Config::storeDir . "/$path";
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
gone($c, "Path " . $path . " is no longer available.") unless isValidPath($path);
$c->stash->{current_view} = 'NixNAR';
$c->stash->{storePath} = $path;
@@ -387,33 +355,6 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
}
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
my ($self, $c, $realisation) = @_;
if (!isLocalStore) {
notFound($c, "There is no binary cache here.");
}
else {
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
if (!$rawRealisation) {
$c->response->status(404);
$c->response->content_type('text/plain');
$c->stash->{plain}->{data} = "does not exist\n";
$c->forward('Hydra::View::Plain');
setCacheHeaders($c, 60 * 60);
return;
}
$c->response->content_type('text/plain');
$c->stash->{plain}->{data} = $rawRealisation;
$c->forward('Hydra::View::Plain');
}
}
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
my ($self, $c, $narinfo) = @_;
@@ -425,7 +366,7 @@ sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
my ($hash) = $narinfo =~ NARINFO_REGEX;
die("Hash length was not 32") if length($hash) != 32;
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
my $path = queryPathFromHashPart($hash);
if (!$path) {
$c->response->status(404);
@@ -583,7 +524,7 @@ sub log :Local :Args(1) {
my $logPrefix = $c->config->{log_prefix};
if (defined $logPrefix) {
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
} else {
notFound($c, "The build log of $drvPath is not available.");
}

View File

@@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
, "jobset.enabled" => 1
},
{ order_by => ["project", "jobset", "job"]
, join => {"jobset" => "project"}
, join => ["project", "jobset"]
})];
}

View File

@@ -32,26 +32,12 @@ sub buildDiff {
removed => [],
unfinished => [],
aborted => [],
# These summary counters cut across the categories to determine whether
# actions such as "Restart all failed" or "Bump queue" are available.
totalAborted => 0,
totalFailed => 0,
totalQueued => 0,
failed => [],
};
my $n = 0;
foreach my $build (@{$builds}) {
my $aborted = $build->finished != 0 && (
# aborted
$build->buildstatus == 3
# cancelled
|| $build->buildstatus == 4
# timeout
|| $build->buildstatus == 7
# log limit exceeded
|| $build->buildstatus == 10
);
my $aborted = $build->finished != 0 && ($build->buildstatus == 3 || $build->buildstatus == 4);
my $d;
my $found = 0;
while ($n < scalar(@{$builds2})) {
@@ -85,19 +71,12 @@ sub buildDiff {
} else {
push @{$ret->{new}}, $build if !$found;
}
if ($build->finished != 0 && $build->buildstatus != 0) {
if ($aborted) {
++$ret->{totalAborted};
} else {
++$ret->{totalFailed};
}
} elsif ($build->finished == 0) {
++$ret->{totalQueued};
if (defined $build->buildstatus && $build->buildstatus != 0) {
push @{$ret->{failed}}, $build;
}
}
return $ret;
}
1;
1;

View File

@@ -15,7 +15,6 @@ our @EXPORT = qw(
forceLogin requireUser requireProjectOwner requireRestartPrivileges requireAdmin requirePost isAdmin isProjectOwner
requireBumpPrivileges
requireCancelBuildPrivileges
requireEvalJobsetPrivileges
trim
getLatestFinishedEval getFirstEval
paramToList
@@ -187,27 +186,6 @@ sub isProjectOwner {
defined $c->model('DB::ProjectMembers')->find({ project => $project, userName => $c->user->username }));
}
sub hasEvalJobsetRole {
my ($c) = @_;
return $c->user_exists && $c->check_user_roles("eval-jobset");
}
sub mayEvalJobset {
my ($c, $project) = @_;
return
$c->user_exists &&
(isAdmin($c) ||
hasEvalJobsetRole($c) ||
isProjectOwner($c, $project));
}
sub requireEvalJobsetPrivileges {
my ($c, $project) = @_;
requireUser($c);
accessDenied($c, "Only the project members, administrators, and accounts with eval-jobset privileges can perform this operation.")
unless mayEvalJobset($c, $project);
}
sub hasCancelBuildRole {
my ($c) = @_;
return $c->user_exists && $c->check_user_roles('cancel-build');
@@ -294,7 +272,7 @@ sub requireAdmin {
sub requirePost {
my ($c) = @_;
error($c, "Request must be POSTed.", 405) if $c->request->method ne "POST";
error($c, "Request must be POSTed.") if $c->request->method ne "POST";
}

View File

@@ -12,14 +12,10 @@ use Nix::Store;
use Encode;
use Sys::Hostname::Long;
use IPC::Run;
use IPC::Run3;
use LWP::UserAgent;
use JSON::MaybeXS;
use UUID4::Tiny qw(is_uuid4_string);
our @ISA = qw(Exporter);
our @EXPORT = qw(
addToStore
cancelBuilds
constructRunCommandLogPath
findLog
@@ -40,16 +36,12 @@ our @EXPORT = qw(
jobsetOverview
jobsetOverview_
pathIsInsidePrefix
readIntoSocket
readNixFile
registerRoot
restartBuilds
runCommand
$MACHINE_LOCAL_STORE
run
);
our $MACHINE_LOCAL_STORE = Nix::Store->new();
sub getHydraHome {
my $dir = $ENV{"HYDRA_HOME"} or die "The HYDRA_HOME directory does not exist!\n";
@@ -179,9 +171,6 @@ sub getDrvLogPath {
for ($fn . $bucketed, $fn . $bucketed . ".bz2") {
return $_ if -f $_;
}
for ($fn . $bucketed, $fn . $bucketed . ".zst") {
return $_ if -f $_;
}
return undef;
}
@@ -198,10 +187,6 @@ sub findLog {
return undef if scalar @outPaths == 0;
# Filter out any NULLs. Content-addressed derivations
# that haven't built yet or failed to build may have a NULL outPath.
@outPaths = grep {defined} @outPaths;
my @steps = $c->model('DB::BuildSteps')->search(
{ path => { -in => [@outPaths] } },
{ select => ["drvpath"]
@@ -301,7 +286,8 @@ sub getEvals {
my @evals = $evals_result_set->search(
{ hasnewbuilds => 1 },
{ order_by => "$me.id DESC", rows => $rows, offset => $offset });
{ order_by => "$me.id DESC", rows => $rows, offset => $offset
, prefetch => { evaluationerror => [ ] } });
my @res = ();
my $cache = {};
@@ -344,68 +330,37 @@ sub getEvals {
sub getMachines {
my %machines = ();
my $config = getHydraConfig();
if ($config->{'queue_runner_endpoint'}) {
my $ua = LWP::UserAgent->new();
my $resp = $ua->get($config->{'queue_runner_endpoint'} . "/status/machines");
if (not $resp->is_success) {
print STDERR "Unable to ask queue runner for machines\n";
return \%machines;
}
my @machinesFiles = split /:/, ($ENV{"NIX_REMOTE_SYSTEMS"} || "/etc/nix/machines");
my $data = decode_json($resp->decoded_content) or return \%machines;
my $machinesData = $data->{machines};
for my $machinesFile (@machinesFiles) {
next unless -e $machinesFile;
open(my $conf, "<", $machinesFile) or die;
while (my $line = <$conf>) {
chomp($line);
$line =~ s/\#.*$//g;
next if $line =~ /^\s*$/;
my @tokens = split /\s+/, $line;
foreach my $machineName (keys %$machinesData) {
my $machine = %$machinesData{$machineName};
$machines{$machineName} =
{ systemTypes => $machine->{systems}
, maxJobs => $machine->{maxJobs}
, speedFactor => $machine->{speedFactor}
, supportedFeatures => [ @{$machine->{supportedFeatures}}, @{$machine->{mandatoryFeatures}} ]
, mandatoryFeatures => [ @{$machine->{mandatoryFeatures}} ]
# New fields for the machine status
, primarySystemType => $machine->{systems}[0]
, hasCapacity => $machine->{hasCapacity}
, hasDynamicCapacity => $machine->{hasDynamicCapacity}
, hasStaticCapacity => $machine->{hasStaticCapacity}
, score => $machine->{score}
, stats => $machine->{stats}
, memTotal => $machine->{totalMem}
if (!defined($tokens[5]) || $tokens[5] eq "-") {
$tokens[5] = "";
}
my @supportedFeatures = split(/,/, $tokens[5] || "");
if (!defined($tokens[6]) || $tokens[6] eq "-") {
$tokens[6] = "";
}
my @mandatoryFeatures = split(/,/, $tokens[6] || "");
$machines{$tokens[0]} =
{ systemTypes => [ split(/,/, $tokens[1]) ]
, sshKeys => $tokens[2]
, maxJobs => int($tokens[3])
, speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1)
, supportedFeatures => [ @supportedFeatures, @mandatoryFeatures ]
, mandatoryFeatures => [ @mandatoryFeatures ]
};
}
} else {
my @machinesFiles = split /:/, ($ENV{"NIX_REMOTE_SYSTEMS"} || "/etc/nix/machines");
for my $machinesFile (@machinesFiles) {
next unless -e $machinesFile;
open(my $conf, "<", $machinesFile) or die;
while (my $line = <$conf>) {
chomp($line);
$line =~ s/\#.*$//g;
next if $line =~ /^\s*$/;
my @tokens = split /\s+/, $line;
if (!defined($tokens[5]) || $tokens[5] eq "-") {
$tokens[5] = "";
}
my @supportedFeatures = split(/,/, $tokens[5] || "");
if (!defined($tokens[6]) || $tokens[6] eq "-") {
$tokens[6] = "";
}
my @mandatoryFeatures = split(/,/, $tokens[6] || "");
$machines{$tokens[0]} =
{ systemTypes => [ split(/,/, $tokens[1]) ]
, maxJobs => int($tokens[3])
, speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1)
, supportedFeatures => [ @supportedFeatures, @mandatoryFeatures ]
, mandatoryFeatures => [ @mandatoryFeatures ]
};
}
close $conf;
}
close $conf;
}
return \%machines;
@@ -452,21 +407,11 @@ sub pathIsInsidePrefix {
return $cur;
}
sub readIntoSocket{
my (%args) = @_;
my $sock;
eval {
open($sock, "-|", @{$args{cmd}}) or die q(failed to open socket from command:\n $x);
};
return $sock;
}
sub runCommand {
sub run {
my (%args) = @_;
my $res = { stdout => "", stderr => "" };
my $stdin = "";
@@ -506,7 +451,7 @@ sub runCommand {
sub grab {
my (%args) = @_;
my $res = runCommand(%args, grabStderr => 0);
my $res = run(%args, grabStderr => 0);
if ($res->{status}) {
my $msgloc = "(in an indeterminate location)";
if (defined $args{dir}) {
@@ -549,7 +494,7 @@ sub restartBuilds {
$builds = $builds->search({ finished => 1 });
foreach my $build ($builds->search({}, { columns => ["drvpath"] })) {
next if !$MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
next if !isValidPath($build->drvpath);
registerRoot $build->drvpath;
}
@@ -616,14 +561,4 @@ sub constructRunCommandLogPath {
return "$hydra_path/runcommand-logs/$bucket/$uuid";
}
sub addToStore {
my ($path) = @_;
my ($stdout, $stderr);
run3(['nix-store', '--add', $path], \undef, \$stdout, \$stderr);
die "cannot add path $path to the Nix store: $stderr\n" if $? != 0;
return trim($stdout);
}
1;

View File

@@ -7,6 +7,7 @@ use Digest::SHA qw(sha256_hex);
use File::Path;
use Hydra::Helper::Exec;
use Hydra::Helper::Nix;
use Nix::Store;
sub supportedInputTypes {
my ($self, $inputTypes) = @_;
@@ -37,9 +38,9 @@ sub fetchInput {
(my $cachedInput) = $self->{db}->resultset('CachedBazaarInputs')->search(
{uri => $uri, revision => $revision});
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
addTempRoot($cachedInput->storepath) if defined $cachedInput;
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
$storePath = $cachedInput->storepath;
$sha256 = $cachedInput->sha256hash;
} else {
@@ -57,7 +58,7 @@ sub fetchInput {
($sha256, $storePath) = split ' ', $stdout;
# FIXME: time window between nix-prefetch-bzr and addTempRoot.
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
addTempRoot($storePath);
$self->{db}->txn_do(sub {
$self->{db}->resultset('CachedBazaarInputs')->create(

View File

@@ -7,7 +7,6 @@ use HTTP::Request;
use LWP::UserAgent;
use JSON::MaybeXS;
use Hydra::Helper::CatalystUtils;
use Hydra::Helper::Nix;
use File::Temp;
use POSIX qw(strftime);
@@ -44,11 +43,14 @@ sub fetchInput {
my $ua = LWP::UserAgent->new();
_iterate("https://api.bitbucket.com/2.0/repositories/$owner/$repo/pullrequests?state=OPEN", $auth, \%pulls, $ua);
my $tempdir = File::Temp->newdir("bitbucket-pulls" . "XXXXX", TMPDIR => 1);
my $filename = "$tempdir/bitbucket-pulls-sorted.json";
my $filename = "$tempdir/bitbucket-pulls.json";
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
print $fh JSON::MaybeXS->new(canonical => 1, pretty => 1)->encode(\%pulls);
print $fh encode_json \%pulls;
close $fh;
my $storePath = addToStore($filename);
system("jq -S . < $filename > $tempdir/bitbucket-pulls-sorted.json");
my $storePath = trim(`nix-store --add "$tempdir/bitbucket-pulls-sorted.json"`
or die "cannot copy path $filename to the Nix store.\n");
chomp $storePath;
my $timestamp = time;
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
}

View File

@@ -9,24 +9,11 @@ use Hydra::Helper::CatalystUtils;
sub stepFinished {
my ($self, $step, $logPath) = @_;
my $doCompress = $self->{config}->{'compress_build_logs'} // '1';
my $silent = $self->{config}->{'compress_build_logs_silent'} // '0';
my $compression = $self->{config}->{'compress_build_logs_compression'} // 'bzip2';
my $doCompress = $self->{config}->{'compress_build_logs'} // "1";
if (not -e $logPath or $doCompress ne "1") {
return;
}
if ($silent ne '1') {
print STDERR "compressing '$logPath' with $compression...\n";
}
if ($compression eq 'bzip2') {
system('bzip2', '--force', $logPath);
} elsif ($compression eq 'zstd') {
system('zstd', '--rm', '--quiet', '-T0', $logPath);
} else {
print STDERR "unknown compression type '$compression'\n";
if ($doCompress eq "1" && -e $logPath) {
print STDERR "compressing $logPath...\n";
system("bzip2", "--force", $logPath);
}
}

View File

@@ -7,7 +7,7 @@ use Digest::SHA qw(sha256_hex);
use File::Path;
use Hydra::Helper::Exec;
use Hydra::Helper::Nix;
use IPC::Run3;
use Nix::Store;
sub supportedInputTypes {
my ($self, $inputTypes) = @_;
@@ -32,7 +32,7 @@ sub fetchInput {
my $stdout = ""; my $stderr = ""; my $res;
if (! -d $clonePath) {
# Clone the repository.
$res = runCommand(timeout => 600,
$res = run(timeout => 600,
cmd => ["darcs", "get", "--lazy", $uri, $clonePath],
dir => $ENV{"TMPDIR"});
die "Error getting darcs repo at `$uri':\n$stderr" if $res->{status};
@@ -58,7 +58,7 @@ sub fetchInput {
{uri => $uri, revision => $revision},
{rows => 1});
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
$storePath = $cachedInput->storepath;
$sha256 = $cachedInput->sha256hash;
$revision = $cachedInput->revision;
@@ -71,15 +71,12 @@ sub fetchInput {
(system "darcs", "get", "--lazy", $clonePath, "$tmpDir/export", "--quiet",
"--to-match", "hash $revision") == 0
or die "darcs export failed";
my ($stdout, $stderr);
run3(['darcs', 'changes', '--count', '--repodir', "$tmpDir/export"], \undef, \$stdout, \$stderr);
die "darcs changes --count failed: $stderr\n" if $? != 0;
$revCount = $stdout;
chomp $revCount;
$revCount = `darcs changes --count --repodir $tmpDir/export`; chomp $revCount;
die "darcs changes --count failed" if $? != 0;
system "rm", "-rf", "$tmpDir/export/_darcs";
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/export", 1, "sha256");
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath);
$storePath = addToStore("$tmpDir/export", 1, "sha256");
$sha256 = queryPathHash($storePath);
$sha256 =~ s/sha256://;
$self->{db}->txn_do(sub {

View File

@@ -137,8 +137,8 @@ sub fetchInput {
my $res;
if (! -d $clonePath) {
# Clone everything and fetch the branch.
$res = runCommand(cmd => ["git", "init", $clonePath]);
$res = runCommand(cmd => ["git", "remote", "add", "origin", "--", $uri], dir => $clonePath) unless $res->{status};
$res = run(cmd => ["git", "init", $clonePath]);
$res = run(cmd => ["git", "remote", "add", "origin", "--", $uri], dir => $clonePath) unless $res->{status};
die "error creating git repo in `$clonePath':\n$res->{stderr}" if $res->{status};
}
@@ -146,9 +146,9 @@ sub fetchInput {
# the remote branch for whatever the repository state is. This command mirrors
# only one branch of the remote repository.
my $localBranch = _isHash($branch) ? "_hydra_tmp" : $branch;
$res = runCommand(cmd => ["git", "fetch", "-fu", "origin", "+$branch:$localBranch"], dir => $clonePath,
$res = run(cmd => ["git", "fetch", "-fu", "origin", "+$branch:$localBranch"], dir => $clonePath,
timeout => $cfg->{timeout});
$res = runCommand(cmd => ["git", "fetch", "-fu", "origin"], dir => $clonePath, timeout => $cfg->{timeout}) if $res->{status};
$res = run(cmd => ["git", "fetch", "-fu", "origin"], dir => $clonePath, timeout => $cfg->{timeout}) if $res->{status};
die "error fetching latest change from git repo at `$uri':\n$res->{stderr}" if $res->{status};
# If deepClone is defined, then we look at the content of the repository
@@ -156,16 +156,16 @@ sub fetchInput {
if (defined $deepClone) {
# Is the target branch a topgit branch?
$res = runCommand(cmd => ["git", "ls-tree", "-r", "$branch", ".topgit"], dir => $clonePath);
$res = run(cmd => ["git", "ls-tree", "-r", "$branch", ".topgit"], dir => $clonePath);
if ($res->{stdout} ne "") {
# Checkout the branch to look at its content.
$res = runCommand(cmd => ["git", "checkout", "--force", "$branch"], dir => $clonePath);
$res = run(cmd => ["git", "checkout", "--force", "$branch"], dir => $clonePath);
die "error checking out Git branch '$branch' at `$uri':\n$res->{stderr}" if $res->{status};
# This is a TopGit branch. Fetch all the topic branches so
# that builders can run "tg patch" and similar.
$res = runCommand(cmd => ["tg", "remote", "--populate", "origin"], dir => $clonePath, timeout => $cfg->{timeout});
$res = run(cmd => ["tg", "remote", "--populate", "origin"], dir => $clonePath, timeout => $cfg->{timeout});
print STDERR "warning: `tg remote --populate origin' failed:\n$res->{stderr}" if $res->{status};
}
}
@@ -186,9 +186,9 @@ sub fetchInput {
{uri => $uri, branch => $branch, revision => $revision, isdeepclone => defined($deepClone) ? 1 : 0},
{rows => 1});
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
addTempRoot($cachedInput->storepath) if defined $cachedInput;
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
$storePath = $cachedInput->storepath;
$sha256 = $cachedInput->sha256hash;
$revision = $cachedInput->revision;
@@ -217,7 +217,7 @@ sub fetchInput {
($sha256, $storePath) = split ' ', grab(cmd => ["nix-prefetch-git", $clonePath, $revision], chomp => 1);
# FIXME: time window between nix-prefetch-git and addTempRoot.
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
addTempRoot($storePath);
$self->{db}->txn_do(sub {
$self->{db}->resultset('CachedGitInputs')->update_or_create(

View File

@@ -88,6 +88,10 @@ sub buildQueued {
common(@_, [], 0);
}
sub buildStarted {
common(@_, [], 1);
}
sub buildFinished {
common(@_, 2);
}

View File

@@ -7,7 +7,6 @@ use HTTP::Request;
use LWP::UserAgent;
use JSON::MaybeXS;
use Hydra::Helper::CatalystUtils;
use Hydra::Helper::Nix;
use File::Temp;
use POSIX qw(strftime);
@@ -59,7 +58,9 @@ sub fetchInput {
print $fh JSON->new->utf8->canonical->encode(\%pulls);
close $fh;
my $storePath = addToStore($filename);
my $storePath = trim(`nix-store --add "$filename"`
or die "cannot copy path $filename to the Nix store.\n");
chomp $storePath;
my $timestamp = time;
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
}

View File

@@ -7,7 +7,6 @@ use HTTP::Request;
use LWP::UserAgent;
use JSON::MaybeXS;
use Hydra::Helper::CatalystUtils;
use Hydra::Helper::Nix;
use File::Temp;
use POSIX qw(strftime);
@@ -18,8 +17,9 @@ tags) from GitHub following a certain naming scheme
=head1 DESCRIPTION
This plugin reads the list of branches or tags using GitHub's REST API. This
list is stored in the nix-store and used as an input to declarative jobsets.
This plugin reads the list of branches or tags using GitHub's REST API. The name
of the reference must follow a particular prefix. This list is stored in the
nix-store and used as an input to declarative jobsets.
=head1 CONFIGURATION
@@ -33,7 +33,7 @@ The declarative project C<spec.json> file must contains an input such as
"pulls": {
"type": "github_refs",
"value": "[owner] [repo] [type] - [prefix]",
"value": "[owner] [repo] heads|tags - [prefix]",
"emailresponsible": false
}
@@ -41,11 +41,12 @@ In the above snippet, C<[owner]> is the repository owner and C<[repo]> is the
repository name. Also note a literal C<->, which is placed there for the future
use.
C<[type]> is the type of ref to list. Typical values are "heads", "tags", and
"pull". "." will include all types.
C<heads|tags> denotes that one of these two is allowed, that is, the third
position should hold either the C<heads> or the C<tags> keyword. In case of the former, the plugin
will fetch all branches, while in case of the latter, it will fetch the tags.
C<prefix> denotes the prefix the reference name must start with, in order to be
included. "." will include all references.
included.
For example, C<"value": "nixos hydra heads - release/"> refers to
L<https://github.com/nixos/hydra> repository, and will fetch all branches that
@@ -100,6 +101,8 @@ sub fetchInput {
return undef if $input_type ne "github_refs";
my ($owner, $repo, $type, $fut, $prefix) = split ' ', $value;
die "type field is neither 'heads' nor 'tags', but '$type'"
unless $type eq 'heads' or $type eq 'tags';
my $auth = $self->{config}->{github_authorization}->{$owner};
my $githubEndpoint = $self->{config}->{github_endpoint} // "https://api.github.com";
@@ -107,11 +110,14 @@ sub fetchInput {
my $ua = LWP::UserAgent->new();
_iterate("$githubEndpoint/repos/$owner/$repo/git/matching-refs/$type/$prefix?per_page=100", $auth, \%refs, $ua);
my $tempdir = File::Temp->newdir("github-refs" . "XXXXX", TMPDIR => 1);
my $filename = "$tempdir/github-refs-sorted.json";
my $filename = "$tempdir/github-refs.json";
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
print $fh JSON::MaybeXS->new(canonical => 1, pretty => 1)->encode(\%refs);
print $fh encode_json \%refs;
close $fh;
my $storePath = addToStore($filename);
system("jq -S . < $filename > $tempdir/github-refs-sorted.json");
my $storePath = trim(qx{nix-store --add "$tempdir/github-refs-sorted.json"}
or die "cannot copy path $filename to the Nix store.\n");
chomp $storePath;
my $timestamp = time;
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
}

View File

@@ -21,7 +21,6 @@ use HTTP::Request;
use LWP::UserAgent;
use JSON::MaybeXS;
use Hydra::Helper::CatalystUtils;
use Hydra::Helper::Nix;
use File::Temp;
use POSIX qw(strftime);
@@ -82,11 +81,14 @@ sub fetchInput {
_iterate($url, $baseUrl, \%pulls, $ua, $target_repo_url);
my $tempdir = File::Temp->newdir("gitlab-pulls" . "XXXXX", TMPDIR => 1);
my $filename = "$tempdir/gitlab-pulls-sorted.json";
my $filename = "$tempdir/gitlab-pulls.json";
open(my $fh, ">", $filename) or die "Cannot open $filename for writing: $!";
print $fh JSON::MaybeXS->new(canonical => 1, pretty => 1)->encode(\%pulls);
print $fh encode_json \%pulls;
close $fh;
my $storePath = addToStore($filename);
system("jq -S . < $filename > $tempdir/gitlab-pulls-sorted.json");
my $storePath = trim(`nix-store --add "$tempdir/gitlab-pulls-sorted.json"`
or die "cannot copy path $filename to the Nix store.\n");
chomp $storePath;
my $timestamp = time;
return { storePath => $storePath, revision => strftime "%Y%m%d%H%M%S", gmtime($timestamp) };
}

View File

@@ -7,6 +7,7 @@ use Digest::SHA qw(sha256_hex);
use File::Path;
use Hydra::Helper::Nix;
use Hydra::Helper::Exec;
use Nix::Store;
use Fcntl qw(:flock);
sub supportedInputTypes {
@@ -67,9 +68,9 @@ sub fetchInput {
(my $cachedInput) = $self->{db}->resultset('CachedHgInputs')->search(
{uri => $uri, branch => $branch, revision => $revision});
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
addTempRoot($cachedInput->storepath) if defined $cachedInput;
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
$storePath = $cachedInput->storepath;
$sha256 = $cachedInput->sha256hash;
} else {
@@ -84,7 +85,7 @@ sub fetchInput {
($sha256, $storePath) = split ' ', $stdout;
# FIXME: time window between nix-prefetch-hg and addTempRoot.
$MACHINE_LOCAL_STORE->addTempRoot($storePath);
addTempRoot($storePath);
$self->{db}->txn_do(sub {
$self->{db}->resultset('CachedHgInputs')->update_or_create(

View File

@@ -5,7 +5,7 @@ use warnings;
use parent 'Hydra::Plugin';
use POSIX qw(strftime);
use Hydra::Helper::Nix;
use IPC::Run3;
use Nix::Store;
sub supportedInputTypes {
my ($self, $inputTypes) = @_;
@@ -30,7 +30,7 @@ sub fetchInput {
{srcpath => $uri, lastseen => {">", $timestamp - $timeout}},
{rows => 1, order_by => "lastseen DESC"});
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
$storePath = $cachedInput->storepath;
$sha256 = $cachedInput->sha256hash;
$timestamp = $cachedInput->timestamp;
@@ -38,20 +38,15 @@ sub fetchInput {
print STDERR "copying input ", $name, " from $uri\n";
if ( $uri =~ /^\// ) {
$storePath = addToStore($uri);
$storePath = `nix-store --add "$uri"`
or die "cannot copy path $uri to the Nix store.\n";
} else {
# Run nix-prefetch-url with PRINT_PATH=1
my ($stdout, $stderr);
local $ENV{PRINT_PATH} = 1;
run3(['nix-prefetch-url', $uri], \undef, \$stdout, \$stderr);
die "cannot fetch $uri to the Nix store: $stderr\n" if $? != 0;
# Get the last line (which is the store path)
my @output_lines = split /\n/, $stdout;
$storePath = $output_lines[-1] if @output_lines;
$storePath = `PRINT_PATH=1 nix-prefetch-url "$uri" | tail -n 1`
or die "cannot fetch $uri to the Nix store.\n";
}
chomp $storePath;
$sha256 = ($MACHINE_LOCAL_STORE->queryPathInfo($storePath, 0))[1] or die;
$sha256 = (queryPathInfo($storePath, 0))[1] or die;
($cachedInput) = $self->{db}->resultset('CachedPathInputs')->search(
{srcpath => $uri, sha256hash => $sha256});

View File

@@ -7,8 +7,6 @@ use File::Temp;
use File::Basename;
use Fcntl;
use IO::File;
use IPC::Run qw(run);
use IPC::Run3;
use Net::Amazon::S3;
use Net::Amazon::S3::Client;
use Digest::SHA;
@@ -16,7 +14,6 @@ use Nix::Config;
use Nix::Store;
use Hydra::Model::DB;
use Hydra::Helper::CatalystUtils;
use Hydra::Helper::Nix;
sub isEnabled {
my ($self) = @_;
@@ -29,11 +26,11 @@ my %compressors = ();
$compressors{"none"} = "";
if (defined($Nix::Config::bzip2)) {
$compressors{"bzip2"} = "$Nix::Config::bzip2",
$compressors{"bzip2"} = "| $Nix::Config::bzip2",
}
if (defined($Nix::Config::xz)) {
$compressors{"xz"} = "$Nix::Config::xz",
$compressors{"xz"} = "| $Nix::Config::xz",
}
my $lockfile = Hydra::Model::DB::getHydraPath . "/.hydra-s3backup.lock";
@@ -95,7 +92,7 @@ sub buildFinished {
my $hash = substr basename($path), 0, 32;
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($path, 0);
my $system;
if (defined $deriver and $MACHINE_LOCAL_STORE->isValidPath($deriver)) {
if (defined $deriver and isValidPath($deriver)) {
$system = derivationFromPath($deriver)->{platform};
}
foreach my $reference (@{$refs}) {
@@ -113,16 +110,7 @@ sub buildFinished {
}
next unless @incomplete_buckets;
my $compressor = $compressors{$compression_type};
if ($compressor eq "") {
# No compression - use IPC::Run3 to redirect stdout to file
run3(["$Nix::Config::binDir/nix-store", "--dump", $path],
\undef, "$tempdir/nar", \undef) or die "nix-store --dump failed: $!";
} else {
# With compression - use IPC::Run to pipe nix-store output to compressor
my $dump_cmd = ["$Nix::Config::binDir/nix-store", "--dump", $path];
my $compress_cmd = [$compressor];
run($dump_cmd, '|', $compress_cmd, '>', "$tempdir/nar") or die "Pipeline failed: $?";
}
system("$Nix::Config::binDir/nix-store --dump $path $compressor > $tempdir/nar") == 0 or die;
my $digest = Digest::SHA->new(256);
$digest->addfile("$tempdir/nar");
my $file_hash = $digest->hexdigest;

View File

@@ -7,6 +7,7 @@ use Digest::SHA qw(sha256_hex);
use Hydra::Helper::Exec;
use Hydra::Helper::Nix;
use IPC::Run;
use Nix::Store;
sub supportedInputTypes {
my ($self, $inputTypes) = @_;
@@ -44,9 +45,9 @@ sub fetchInput {
(my $cachedInput) = $self->{db}->resultset('CachedSubversionInputs')->search(
{uri => $uri, revision => $revision});
$MACHINE_LOCAL_STORE->addTempRoot($cachedInput->storepath) if defined $cachedInput;
addTempRoot($cachedInput->storepath) if defined $cachedInput;
if (defined $cachedInput && $MACHINE_LOCAL_STORE->isValidPath($cachedInput->storepath)) {
if (defined $cachedInput && isValidPath($cachedInput->storepath)) {
$storePath = $cachedInput->storepath;
$sha256 = $cachedInput->sha256hash;
} else {
@@ -61,16 +62,16 @@ sub fetchInput {
die "error checking out Subversion repo at `$uri':\n$stderr" if $res;
if ($type eq "svn-checkout") {
$storePath = $MACHINE_LOCAL_STORE->addToStore($wcPath, 1, "sha256");
$storePath = addToStore($wcPath, 1, "sha256");
} else {
# Hm, if the Nix Perl bindings supported filters in
# addToStore(), then we wouldn't need to make a copy here.
my $tmpDir = File::Temp->newdir("hydra-svn-export.XXXXXX", CLEANUP => 1, TMPDIR => 1) or die;
(system "svn", "export", $wcPath, "$tmpDir/source", "--quiet") == 0 or die "svn export failed";
$storePath = $MACHINE_LOCAL_STORE->addToStore("$tmpDir/source", 1, "sha256");
$storePath = addToStore("$tmpDir/source", 1, "sha256");
}
$sha256 = $MACHINE_LOCAL_STORE->queryPathHash($storePath); $sha256 =~ s/sha256://;
$sha256 = queryPathHash($storePath); $sha256 =~ s/sha256://;
$self->{db}->txn_do(sub {
$self->{db}->resultset('CachedSubversionInputs')->update_or_create(

View File

@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
=head2 path
data_type: 'text'
is_nullable: 1
is_nullable: 0
=cut
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
"name",
{ data_type => "text", is_nullable => 0 },
"path",
{ data_type => "text", is_nullable => 1 },
{ data_type => "text", is_nullable => 0 },
);
=head1 PRIMARY KEY
@@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg
my %hint = (
columns => [

View File

@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
=head2 path
data_type: 'text'
is_nullable: 1
is_nullable: 0
=cut
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
"name",
{ data_type => "text", is_nullable => 0 },
"path",
{ data_type => "text", is_nullable => 1 },
{ data_type => "text", is_nullable => 0 },
);
=head1 PRIMARY KEY
@@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
);
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw
# You can replace this text with custom code or comments, and it will be preserved on regeneration

View File

@@ -105,6 +105,4 @@ __PACKAGE__->add_column(
"+id" => { retrieve_on_insert => 1 }
);
__PACKAGE__->mk_group_accessors('column' => 'has_error');
1;

View File

@@ -386,8 +386,6 @@ __PACKAGE__->add_column(
"+id" => { retrieve_on_insert => 1 }
);
__PACKAGE__->mk_group_accessors('column' => 'has_error');
sub supportsDynamicRunCommand {
my ($self) = @_;

View File

@@ -216,7 +216,7 @@ sub json_hint {
sub _authenticator() {
my $authenticator = Crypt::Passphrase->new(
encoder => { module => 'Argon2', output_size => 16 },
encoder => 'Argon2',
validators => [
(sub {
my ($password, $hash) = @_;

View File

@@ -1,30 +0,0 @@
package Hydra::Schema::ResultSet::EvaluationErrors;
use strict;
use utf8;
use warnings;
use parent 'DBIx::Class::ResultSet';
use Storable qw(dclone);
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
# Exclude expensive error message values unless explicitly requested, and
# replace them with a summary field describing their presence/absence.
sub search_rs {
my ( $class, $query, $attrs ) = @_;
if ($attrs) {
$attrs = dclone($attrs);
}
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
}
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
push @{ $attrs->{'remove_columns'} }, 'errormsg';
}
return $class->next::method($query, $attrs);
}

View File

@@ -1,30 +0,0 @@
package Hydra::Schema::ResultSet::Jobsets;
use strict;
use utf8;
use warnings;
use parent 'DBIx::Class::ResultSet';
use Storable qw(dclone);
__PACKAGE__->load_components('Helper::ResultSet::RemoveColumns');
# Exclude expensive error message values unless explicitly requested, and
# replace them with a summary field describing their presence/absence.
sub search_rs {
my ( $class, $query, $attrs ) = @_;
if ($attrs) {
$attrs = dclone($attrs);
}
unless (exists $attrs->{'select'} || exists $attrs->{'columns'}) {
$attrs->{'+columns'}->{'has_error'} = "errormsg != ''";
}
unless (exists $attrs->{'+columns'}->{'errormsg'}) {
push @{ $attrs->{'remove_columns'} }, 'errormsg';
}
return $class->next::method($query, $attrs);
}

View File

@@ -8,7 +8,6 @@ use MIME::Base64;
use Nix::Manifest;
use Nix::Store;
use Nix::Utils;
use Hydra::Helper::Nix;
use base qw/Catalyst::View/;
sub process {
@@ -18,7 +17,7 @@ sub process {
$c->response->content_type('text/x-nix-narinfo'); # !!! check MIME type
my ($deriver, $narHash, $time, $narSize, $refs) = $MACHINE_LOCAL_STORE->queryPathInfo($storePath, 1);
my ($deriver, $narHash, $time, $narSize, $refs) = queryPathInfo($storePath, 1);
my $info;
$info .= "StorePath: $storePath\n";
@@ -29,8 +28,8 @@ sub process {
$info .= "References: " . join(" ", map { basename $_ } @{$refs}) . "\n";
if (defined $deriver) {
$info .= "Deriver: " . basename $deriver . "\n";
if ($MACHINE_LOCAL_STORE->isValidPath($deriver)) {
my $drv = $MACHINE_LOCAL_STORE->derivationFromPath($deriver);
if (isValidPath($deriver)) {
my $drv = derivationFromPath($deriver);
$info .= "System: $drv->{platform}\n";
}
}

View File

@@ -16,10 +16,7 @@ sub process {
my $tail = int($c->stash->{tail} // "0");
if ($logPath =~ /\.zst$/) {
my $doTail = $tail ? "| tail -n '$tail'" : "";
open($fh, "-|", "zstd -dc < '$logPath' $doTail") or die;
} elsif ($logPath =~ /\.bz2$/) {
if ($logPath =~ /\.bz2$/) {
my $doTail = $tail ? "| tail -n '$tail'" : "";
open($fh, "-|", "bzip2 -dc < '$logPath' $doTail") or die;
} else {

View File

@@ -6,7 +6,6 @@ use base 'Catalyst::View::TT';
use Template::Plugin::HTML;
use Hydra::Helper::Nix;
use Time::Seconds;
use Digest::SHA qw(sha1_hex);
__PACKAGE__->config(
TEMPLATE_EXTENSION => '.tt',
@@ -26,14 +25,8 @@ __PACKAGE__->config(
makeNameTextForJobset
relativeDuration
stripSSHUser
metricDivId
/]);
sub metricDivId {
my ($self, $c, $text) = @_;
return "metric-" . sha1_hex($text);
}
sub buildLogExists {
my ($self, $c, $build) = @_;
return 1 if defined $c->config->{log_prefix};

22
src/lib/Makefile.am Normal file
View File

@@ -0,0 +1,22 @@
PERL_MODULES = \
$(wildcard *.pm) \
$(wildcard Hydra/*.pm) \
$(wildcard Hydra/Helper/*.pm) \
$(wildcard Hydra/Model/*.pm) \
$(wildcard Hydra/View/*.pm) \
$(wildcard Hydra/Schema/*.pm) \
$(wildcard Hydra/Schema/Result/*.pm) \
$(wildcard Hydra/Schema/ResultSet/*.pm) \
$(wildcard Hydra/Controller/*.pm) \
$(wildcard Hydra/Base/*.pm) \
$(wildcard Hydra/Base/Controller/*.pm) \
$(wildcard Hydra/Script/*.pm) \
$(wildcard Hydra/Component/*.pm) \
$(wildcard Hydra/Event/*.pm) \
$(wildcard Hydra/Plugin/*.pm)
EXTRA_DIST = \
$(PERL_MODULES)
hydradir = $(libexecdir)/hydra/lib
nobase_hydra_DATA = $(PERL_MODULES)

View File

@@ -1,103 +0,0 @@
package Perl::Critic::Policy::Hydra::ProhibitShellInvokingSystemCalls;
use strict;
use warnings;
use constant;
use Perl::Critic::Utils qw{ :severities :classification :ppi };
use base 'Perl::Critic::Policy';
our $VERSION = '1.000';
use constant DESC => q{Shell-invoking system calls are prohibited};
use constant EXPL => q{Use list form system() or IPC::Run3 for better security. String form invokes shell and is vulnerable to injection};
sub supported_parameters { return () }
sub default_severity { return $SEVERITY_HIGHEST }
sub default_themes { return qw( hydra security ) }
sub applies_to { return 'PPI::Token::Word' }
sub violates {
my ( $self, $elem, undef ) = @_;
# Only check system() and exec() calls
return () unless $elem->content() =~ /^(system|exec)$/;
return () unless is_function_call($elem);
# Skip method calls (->system or ->exec)
my $prev = $elem->sprevious_sibling();
return () if $prev && $prev->isa('PPI::Token::Operator') && $prev->content() eq '->';
# Get first argument after function name, skipping whitespace
my $args = $elem->snext_sibling();
return () unless $args;
$args = $args->snext_sibling() while $args && $args->isa('PPI::Token::Whitespace');
# For parenthesized calls, look inside
my $search_elem = $args;
if ($args && $args->isa('PPI::Structure::List')) {
$search_elem = $args->schild(0);
return () unless $search_elem;
}
# Check if it's list form (has comma)
my $current = $search_elem;
if ($current && $current->isa('PPI::Statement')) {
# Look through statement children
for my $child ($current->schildren()) {
return () if $child->isa('PPI::Token::Operator') && $child->content() eq ',';
}
} else {
# Look through siblings for non-parenthesized calls
while ($current) {
return () if $current->isa('PPI::Token::Operator') && $current->content() eq ',';
last if $current->isa('PPI::Token::Structure') && $current->content() eq ';';
$current = $current->snext_sibling();
}
}
# Check if first arg is array variable
my $first = $search_elem->isa('PPI::Statement') ?
$search_elem->schild(0) : $search_elem;
return () if $first && $first->isa('PPI::Token::Symbol') && $first->content() =~ /^[@]/;
# Check if it's a safe single-word command
if ($first && $first->isa('PPI::Token::Quote')) {
my $content = $first->string();
return () if $content =~ /^[a-zA-Z0-9_\-\.\/]+$/;
}
return $self->violation( DESC, EXPL, $elem );
}
1;
__END__
=pod
=head1 NAME
Perl::Critic::Policy::Hydra::ProhibitShellInvokingSystemCalls - Prohibit shell-invoking system() and exec() calls
=head1 DESCRIPTION
This policy prohibits the use of C<system()> and C<exec()> functions when called with a single string argument,
which invokes the shell and is vulnerable to injection attacks.
The list form (e.g., C<system('ls', '-la')>) is allowed as it executes directly without shell interpretation.
For better error handling and output capture, consider using C<IPC::Run3>.
=head1 CONFIGURATION
This Policy is not configurable except for the standard options.
=head1 AUTHOR
Hydra Development Team
=head1 COPYRIGHT
Copyright (c) 2025 Hydra Development Team. All rights reserved.
=cut

View File

@@ -2,8 +2,7 @@
#include <pqxx/pqxx>
#include <nix/util/environment-variables.hh>
#include <nix/util/util.hh>
#include "util.hh"
struct Connection : pqxx::connection
@@ -27,20 +26,19 @@ struct Connection : pqxx::connection
};
class receiver
class receiver : public pqxx::notification_receiver
{
std::optional<std::string> status;
pqxx::connection & conn;
public:
receiver(pqxx::connection_base & c, const std::string & channel)
: conn(static_cast<pqxx::connection &>(c))
: pqxx::notification_receiver(c, channel) { }
void operator() (const std::string & payload, int pid) override
{
conn.listen(channel, [this](pqxx::notification n) {
status = std::string(n.payload);
});
}
status = payload;
};
std::optional<std::string> get() {
auto s = status;

View File

@@ -2,8 +2,7 @@
#include <map>
#include <nix/util/file-system.hh>
#include <nix/util/util.hh>
#include "util.hh"
struct HydraConfig
{

View File

@@ -1,5 +0,0 @@
libhydra_inc = include_directories('.')
libhydra_dep = declare_dependency(
include_directories: [libhydra_inc],
)

View File

@@ -1,77 +0,0 @@
# Native code
subdir('libhydra')
subdir('hydra-evaluator')
subdir('hydra-queue-runner')
hydra_libexecdir = get_option('libexecdir') / 'hydra'
# Data and interpreted
foreach dir : ['lib', 'root']
install_subdir(dir,
install_dir: hydra_libexecdir,
)
endforeach
subdir('sql')
subdir('ttf')
# Static files for website
hydra_libexecdir_static = hydra_libexecdir / 'root' / 'static'
## Bootstrap
bootstrap_name = 'bootstrap-4.3.1-dist'
bootstrap = custom_target(
'extract-bootstrap',
input: 'root' / (bootstrap_name + '.zip'),
output: bootstrap_name,
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
)
custom_target(
'name-bootstrap',
input: bootstrap,
output: 'bootstrap',
command: ['cp', '-r', '@INPUT@' , '@OUTPUT@'],
install: true,
install_dir: hydra_libexecdir_static,
)
## Flot
custom_target(
'extract-flot',
input: 'root' / 'flot-0.8.3.zip',
output: 'flot',
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
install: true,
install_dir: hydra_libexecdir_static / 'js',
)
## Fontawesome
fontawesome_name = 'fontawesome-free-5.10.2-web'
fontawesome = custom_target(
'extract-fontawesome',
input: 'root' / (fontawesome_name + '.zip'),
output: fontawesome_name,
command: ['unzip', '-u', '-d', '@OUTDIR@', '@INPUT@'],
)
custom_target(
'name-fontawesome',
input: fontawesome,
output: 'fontawesome',
command: ['cp', '-r', '@INPUT@' , '@OUTPUT@'],
install: true,
install_dir: hydra_libexecdir_static,
)
# Scripts
install_subdir('script',
install_dir: get_option('bindir'),
exclude_files: [
'hydra-dev-server',
],
install_mode: 'rwxr-xr-x',
strip_directory: true,
)

39
src/root/Makefile.am Normal file
View File

@@ -0,0 +1,39 @@
TEMPLATES = $(wildcard *.tt)
STATIC = \
$(wildcard static/images/*) \
$(wildcard static/css/*) \
static/js/bootbox.min.js \
static/js/popper.min.js \
static/js/common.js \
static/js/jquery/jquery-3.4.1.min.js \
static/js/jquery/jquery-ui-1.10.4.min.js
FLOT = flot-0.8.3.zip
BOOTSTRAP = bootstrap-4.3.1-dist.zip
FONTAWESOME = fontawesome-free-5.10.2-web.zip
ZIPS = $(FLOT) $(BOOTSTRAP) $(FONTAWESOME)
EXTRA_DIST = $(TEMPLATES) $(STATIC) $(ZIPS)
hydradir = $(libexecdir)/hydra/root
nobase_hydra_DATA = $(EXTRA_DIST)
all:
mkdir -p $(srcdir)/static/js
unzip -u -d $(srcdir)/static $(BOOTSTRAP)
rm -rf $(srcdir)/static/bootstrap
mv $(srcdir)/static/$(basename $(BOOTSTRAP)) $(srcdir)/static/bootstrap
unzip -u -d $(srcdir)/static/js $(FLOT)
unzip -u -d $(srcdir)/static $(FONTAWESOME)
rm -rf $(srcdir)/static/fontawesome
mv $(srcdir)/static/$(basename $(FONTAWESOME)) $(srcdir)/static/fontawesome
install-data-local: $(ZIPS)
mkdir -p $(hydradir)/static/js
cp -prvd $(srcdir)/static/js/* $(hydradir)/static/js
mkdir -p $(hydradir)/static/bootstrap
cp -prvd $(srcdir)/static/bootstrap/* $(hydradir)/static/bootstrap
mkdir -p $(hydradir)/static/fontawesome/{css,webfonts}
cp -prvd $(srcdir)/static/fontawesome/css/* $(hydradir)/static/fontawesome/css
cp -prvd $(srcdir)/static/fontawesome/webfonts/* $(hydradir)/static/fontawesome/webfonts

View File

@@ -11,7 +11,7 @@ titleHTML="Latest builds" _
"") %]
[% PROCESS common.tt %]
<p>Showing builds [% (page - 1) * resultsPerPage + 1 %] - [% (page - 1) * resultsPerPage + builds.size %] out of [% HTML.escape(total) %] in order of descending finish time.</p>
<p>Showing builds [% (page - 1) * resultsPerPage + 1 %] - [% (page - 1) * resultsPerPage + builds.size %] out of [% total %] in order of descending finish time.</p>
[% INCLUDE renderBuildList hideProjectName=project hideJobsetName=jobset hideJobName=job %]
[% INCLUDE renderPager %]

View File

@@ -33,7 +33,7 @@
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
<div class="modal-dialog" role="document">
<div class="modal-content">
<form id="signin-form">
<form>
<div class="modal-body">
<div class="form-group">
<label for="username" class="col-form-label">User name</label>
@@ -45,7 +45,7 @@
</div>
</div>
<div class="modal-footer">
<button type="submit" class="btn btn-primary">Sign in</button>
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
</div>
</form>
@@ -57,11 +57,10 @@
function finishSignOut() { }
$("#signin-form").submit(function(e) {
e.preventDefault();
$("#do-signin").click(function() {
requestJSON({
url: "[% c.uri_for('/login') %]",
data: $(this).serialize(),
data: $(this).parents("form").serialize(),
type: 'POST',
success: function(data) {
window.location.reload();
@@ -83,7 +82,7 @@
function onGoogleSignIn(googleUser) {
requestJSON({
url: "[% c.uri_for('/google-login') %]",
data: "id_token=" + googleUser.credential,
data: "id_token=" + googleUser.getAuthResponse().id_token,
type: 'POST',
success: function(data) {
window.location.reload();
@@ -92,6 +91,9 @@
return false;
};
$("#google-signin").click(function() {
$(".g-signin2:first-child > div").click();
});
</script>
[% END %]

View File

@@ -37,7 +37,7 @@ END;
seen.${step.drvpath} = 1;
log = c.uri_for('/build' build.id 'nixlog' step.stepnr); %]
<tr>
<td>[% HTML.escape(step.stepnr) %]</td>
<td>[% step.stepnr %]</td>
<td>
[% IF step.type == 0 %]
Build of <tt>[% INCLUDE renderOutputs outputs=step.buildstepoutputs %]</tt>
@@ -61,7 +61,21 @@ END;
<td>[% IF step.busy != 0 || ((step.machine || step.starttime) && (step.status == 0 || step.status == 1 || step.status == 3 || step.status == 4 || step.status == 7)); INCLUDE renderMachineName machine=step.machine; ELSE; "<em>n/a</em>"; END %]</td>
<td class="step-status">
[% IF step.busy != 0 %]
[% INCLUDE renderBusyStatus %]
[% IF step.busy == 1 %]
<strong>Preparing</strong>
[% ELSIF step.busy == 10 %]
<strong>Connecting</strong>
[% ELSIF step.busy == 20 %]
<strong>Sending inputs</strong>
[% ELSIF step.busy == 30 %]
<strong>Building</strong>
[% ELSIF step.busy == 40 %]
<strong>Receiving outputs</strong>
[% ELSIF step.busy == 50 %]
<strong>Post-processing</strong>
[% ELSE %]
<strong>Unknown state</strong>
[% END %]
[% ELSIF step.status == 0 %]
[% IF step.isnondeterministic %]
<span class="warn">Succeeded with non-determistic result</span>
@@ -86,7 +100,7 @@ END;
[% ELSIF step.status == 11 %]
<span class="error">Output limit exceeded</span>
[% ELSIF step.status == 12 %]
<span class="error">Non-determinism detected</span> [% IF step.timesbuilt %] after [% HTML.escape(step.timesbuilt) %] times[% END %]
<span class="error">Non-determinism detected</span> [% IF step.timesbuilt %] after [% step.timesbuilt %] times[% END %]
[% ELSIF step.errormsg %]
<span class="error">Failed</span>: <em>[% HTML.escape(step.errormsg) %]</em>
[% ELSE %]
@@ -112,16 +126,16 @@ END;
[% IF c.user_exists %]
[% IF available %]
[% IF build.keep %]
<a class="dropdown-item" [% HTML.attributes(href => c.uri_for('/build' build.id 'keep' 0)) %]>Unkeep</a>
<a class="dropdown-item" href="[% c.uri_for('/build' build.id 'keep' 0) %]">Unkeep</a>
[% ELSE %]
<a class="dropdown-item" [% HTML.attributes(href => c.uri_for('/build' build.id 'keep' 1)) %]>Keep</a>
<a class="dropdown-item" href="[% c.uri_for('/build' build.id 'keep' 1) %]">Keep</a>
[% END %]
[% END %]
[% IF build.finished %]
<a class="dropdown-item" [% HTML.attributes(href => c.uri_for('/build' build.id 'restart')) %]>Restart</a>
<a class="dropdown-item" href="[% c.uri_for('/build' build.id 'restart') %]">Restart</a>
[% ELSE %]
<a class="dropdown-item" [% HTML.attributes(href => c.uri_for('/build' build.id 'cancel')) %]>Cancel</a>
<a class="dropdown-item" [% HTML.attributes(href => c.uri_for('/build' build.id 'bump')) %]>Bump up</a>
<a class="dropdown-item" href="[% c.uri_for('/build' build.id 'cancel') %]">Cancel</a>
<a class="dropdown-item" href="[% c.uri_for('/build' build.id 'bump') %]">Bump up</a>
[% END %]
[% END %]
</div>
@@ -132,7 +146,7 @@ END;
<li class="nav-item"><a class="nav-link" href="#tabs-details" data-toggle="tab">Details</a></li>
<li class="nav-item"><a class="nav-link" href="#tabs-buildinputs" data-toggle="tab">Inputs</a></li>
[% IF steps.size() > 0 %]<li class="nav-item"><a class="nav-link" href="#tabs-buildsteps" data-toggle="tab">Build Steps</a></li>[% END %]
[% IF build.dependents %]<li class="nav-item"><a class="nav-link" href="#tabs-usedby" data-toggle="tab">Used By</a></li>[% END %]
[% IF build.dependents %]<li class="nav-item"><a class="nav-link" href="#tabs-usedby" data-toggle="tab">Used By</a></li>[% END%]
[% IF drvAvailable %]<li class="nav-item"><a class="nav-link" href="#tabs-build-deps" data-toggle="tab">Build Dependencies</a></li>[% END %]
[% IF localStore && available %]<li class="nav-item"><a class="nav-link" href="#tabs-runtime-deps" data-toggle="tab">Runtime Dependencies</a></li>[% END %]
[% IF runcommandlogProblem || runcommandlogs.size() > 0 %]<li class="nav-item"><a class="nav-link" href="#tabs-runcommandlogs" data-toggle="tab">RunCommand Logs[% IF runcommandlogProblem %] <span class="badge badge-warning">Disabled</span>[% END %]</a></li>[% END %]
@@ -151,7 +165,7 @@ END;
<table class="info-table">
<tr>
<th>Build ID:</th>
<td>[% HTML.escape(build.id) %]</td>
<td>[% build.id %]</td>
</tr>
<tr>
<th>Status:</th>
@@ -168,9 +182,9 @@ END;
END;
%];
[%+ IF nrFinished == nrConstituents && nrFailedConstituents == 0 %]
all [% HTML.escape(nrConstituents) %] constituent builds succeeded
all [% nrConstituents %] constituent builds succeeded
[% ELSE %]
[% HTML.escape(nrFailedConstituents) %] out of [% HTML.escape(nrConstituents) %] constituent builds failed
[% nrFailedConstituents %] out of [% nrConstituents %] constituent builds failed
[% IF nrFinished < nrConstituents %]
([% nrConstituents - nrFinished %] still pending)
[% END %]
@@ -180,25 +194,25 @@ END;
</tr>
<tr>
<th>System:</th>
<td><tt>[% build.system | html %]</tt></td>
<td><tt>[% build.system %]</tt></td>
</tr>
[% IF build.releasename %]
<tr>
<th>Release name:</th>
<td><tt>[% build.releasename | html %]</tt></td>
<td><tt>[% HTML.escape(build.releasename) %]</tt></td>
</tr>
[% ELSE %]
<tr>
<th>Nix name:</th>
<td><tt>[% build.nixname | html %]</tt></td>
<td><tt>[% build.nixname %]</tt></td>
</tr>
[% END %]
[% IF eval %]
<tr>
<th>Part of:</th>
<td>
<a [% HTML.attributes(href => c.uri_for(c.controller('JobsetEval').action_for('view'), [eval.id])) %]>evaluation [% HTML.escape(eval.id) %]</a>
[% IF nrEvals > 1 +%] (and <a [% HTML.attributes(href => c.uri_for('/build' build.id 'evals')) %]>[% nrEvals - 1 %] others</a>)[% END %]
<a href="[% c.uri_for(c.controller('JobsetEval').action_for('view'), [eval.id]) %]">evaluation [% eval.id %]</a>
[% IF nrEvals > 1 +%] (and <a href="[% c.uri_for('/build' build.id 'evals') %]">[% nrEvals - 1 %] others</a>)[% END %]
</td>
</tr>
[% END %]
@@ -226,9 +240,9 @@ END;
<th>Logfile:</th>
<td>
[% actualLog = cachedBuildStep ? c.uri_for('/build' cachedBuild.id 'nixlog' cachedBuildStep.stepnr) : c.uri_for('/build' build.id 'log') %]
<a class="btn btn-secondary btn-sm" [% HTML.attributes(href => actualLog) %]>pretty</a>
<a class="btn btn-secondary btn-sm" [% HTML.attributes(href => actualLog _ "/raw") %]>raw</a>
<a class="btn btn-secondary btn-sm" [% HTML.attributes(href => actualLog _ "/tail") %]>tail</a>
<a class="btn btn-secondary btn-sm" href="[%actualLog%]">pretty</a>
<a class="btn btn-secondary btn-sm" href="[%actualLog%]/raw">raw</a>
<a class="btn btn-secondary btn-sm" href="[%actualLog%]/tail">tail</a>
</td>
</tr>
[% END %]
@@ -336,12 +350,12 @@ END;
[% IF eval.nixexprinput %]
<tr>
<th>Nix expression:</th>
<td>file <tt>[% eval.nixexprpath | html %]</tt> in input <tt>[% eval.nixexprinput | html %]</tt></td>
<td>file <tt>[% HTML.escape(eval.nixexprpath) %]</tt> in input <tt>[% HTML.escape(eval.nixexprinput) %]</tt></td>
</tr>
[% END %]
<tr>
<th>Nix name:</th>
<td><tt>[% build.nixname | html %]</tt></td>
<td><tt>[% build.nixname %]</tt></td>
</tr>
<tr>
<th>Short description:</th>
@@ -361,11 +375,11 @@ END;
</tr>
<tr>
<th>System:</th>
<td><tt>[% build.system | html %]</tt></td>
<td><tt>[% build.system %]</tt></td>
</tr>
<tr>
<th>Derivation store path:</th>
<td><tt>[% build.drvpath | html %]</tt></td>
<td><tt>[% build.drvpath %]</tt></td>
</tr>
<tr>
<th>Output store paths:</th>
@@ -376,14 +390,14 @@ END;
<tr>
<th>Closure size:</th>
<td>[% mibs(build.closuresize / (1024 * 1024)) %] MiB
(<a [% HTML.attributes(href => chartsURL) %]>history</a>)</td>
(<a href="[%chartsURL%]">history</a>)</td>
</tr>
[% END %]
[% IF build.finished && build.closuresize %]
<tr>
<th>Output size:</th>
<td>[% mibs(build.size / (1024 * 1024)) %] MiB
(<a [% HTML.attributes(href => chartsURL) %]>history</a>)</td>
(<a href="[%chartsURL%]">history</a>)</td>
</tr>
[% END %]
[% IF build.finished && build.buildproducts %]
@@ -412,9 +426,9 @@ END;
<tbody>
[% FOREACH metric IN build.buildmetrics %]
<tr>
<td><tt><a class="row-link" [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]">[% metric.name | html %]</a></tt></td>
<td style="text-align: right">[% HTML.escape(metric.value) %]</td>
<td>[% HTML.escape(metric.unit) %]</td>
<td><tt><a class="row-link" [% HTML.attributes(href => c.uri_for('/job' project.name jobset.name job 'metric' metric.name)) %]">[%HTML.escape(metric.name)%]</a></tt></td>
<td style="text-align: right">[%metric.value%]</td>
<td>[%metric.unit%]</td>
</tr>
[% END %]
</tbody>
@@ -456,8 +470,8 @@ END;
[% FOREACH input IN build.dependents %]
<tr>
<td>[% INCLUDE renderFullBuildLink build=input.build %]</td>
<td><tt>[% input.name | html %]</tt></td>
<td><tt>[% input.build.system | html %]</tt></td>
<td><tt>[% input.name %]</tt></td>
<td><tt>[% input.build.system %]</tt></td>
<td>[% INCLUDE renderDateTime timestamp = input.build.timestamp %]</td>
</tr>
[% END %]
@@ -484,7 +498,7 @@ END;
[% ELSIF runcommandlogProblem == "disabled-jobset" %]
This jobset does not enable Dynamic RunCommand support.
[% ELSE %]
Dynamic RunCommand is not enabled: [% HTML.escape(runcommandlogProblem) %].
Dynamic RunCommand is not enabled: [% runcommandlogProblem %].
[% END %]
</div>
[% END %]
@@ -503,18 +517,18 @@ END;
</div>
<div class="d-flex flex-column mr-auto align-self-center">
<div><tt>[% runcommandlog.command | html %]</tt></div>
<div><tt>[% runcommandlog.command | html%]</tt></div>
<div>
[% IF not runcommandlog.is_running() %]
[% IF runcommandlog.did_fail_with_signal() %]
Exit signal: [% runcommandlog.signal | html %]
Exit signal: [% runcommandlog.signal %]
[% IF runcommandlog.core_dumped %]
(Core Dumped)
[% END %]
[% ELSIF runcommandlog.did_fail_with_exec_error() %]
Exec error: [% runcommandlog.error_number | html %]
Exec error: [% runcommandlog.error_number %]
[% ELSIF not runcommandlog.did_succeed() %]
Exit code: [% runcommandlog.exit_code | html %]
Exit code: [% runcommandlog.exit_code %]
[% END %]
[% END %]
</div>
@@ -532,9 +546,9 @@ END;
[% IF runcommandlog.uuid != undef %]
[% runLog = c.uri_for('/build', build.id, 'runcommandlog', runcommandlog.uuid) %]
<div>
<a class="btn btn-secondary btn-sm" [% HTML.attributes(href => runLog) %]>pretty</a>
<a class="btn btn-secondary btn-sm" [% HTML.attributes(href => runLog) %]/raw">raw</a>
<a class="btn btn-secondary btn-sm" [% HTML.attributes(href => runLog) %]/tail">tail</a>
<a class="btn btn-secondary btn-sm" href="[% runLog %]">pretty</a>
<a class="btn btn-secondary btn-sm" href="[% runLog %]/raw">raw</a>
<a class="btn btn-secondary btn-sm" href="[% runLog %]/tail">tail</a>
</div>
[% END %]
</div>
@@ -563,7 +577,7 @@ END;
[% IF eval.flake %]
<p>If you have <a href='https://nixos.org/download/'>Nix
<p>If you have <a href='https://nixos.org/nix/download.html'>Nix
installed</a>, you can reproduce this build on your own machine by
running the following command:</p>
@@ -573,7 +587,7 @@ END;
[% ELSE %]
<p>If you have <a href='https://nixos.org/download/'>Nix
<p>If you have <a href='https://nixos.org/nix/download.html'>Nix
installed</a>, you can reproduce this build on your own machine by
downloading <a [% HTML.attributes(href => url) %]>a script</a>
that checks out all inputs of the build and then invokes Nix to

View File

@@ -7,7 +7,7 @@ href="http://nixos.org/">Nix package manager</a>. If you have Nix
installed, you can subscribe to this channel by once executing</p>
<div class="card bg-light"><div class="card-body"><pre>
<span class="shell-prompt">$ </span>nix-channel --add [% HTML.escape(curUri) +%]
<span class="shell-prompt">$ </span>nix-channel --add [% curUri +%]
<span class="shell-prompt">$ </span>nix-channel --update
</pre></div></div>
@@ -49,9 +49,9 @@ installed, you can subscribe to this channel by once executing</p>
[% b = pkg.build %]
<tr>
<td><a [% HTML.attributes(href => c.uri_for('/build' b.id)) %]>[% HTML.escape(b.id) %]</a></td>
<td><tt>[% b.get_column('releasename') || b.nixname | html %]</tt></td>
<td><tt>[% b.system | html %]</tt></td>
<td><a href="[% c.uri_for('/build' b.id) %]">[% b.id %]</a></td>
<td><tt>[% b.get_column('releasename') || b.nixname %]</tt></td>
<td><tt>[% b.system %]</tt></td>
<td>
[% IF b.homepage %]
<a [% HTML.attributes(href => b.homepage) %]>[% HTML.escape(b.description) %]</a>

View File

@@ -55,17 +55,17 @@ BLOCK renderRelativeDate %]
[% END;
BLOCK renderProjectName %]
<a [% IF inRow %]class="row-link"[% END %] [% HTML.attributes(href => c.uri_for('/project' project)) %]><tt>[% project | html %]</tt></a>
<a [% IF inRow %]class="row-link"[% END %] href="[% c.uri_for('/project' project) %]"><tt>[% project %]</tt></a>
[% END;
BLOCK renderJobsetName %]
<a [% IF inRow %]class="row-link"[% END %] [% HTML.attributes(href => c.uri_for('/jobset' project jobset)) %]><tt>[% jobset | html %]</tt></a>
<a [% IF inRow %]class="row-link"[% END %] href="[% c.uri_for('/jobset' project jobset) %]"><tt>[% jobset %]</tt></a>
[% END;
BLOCK renderJobName %]
<a [% IF inRow %]class="row-link"[% END %] [% HTML.attributes(href => c.uri_for('/job' project jobset job)) %]>[% job | html %]</a>
<a [% IF inRow %]class="row-link"[% END %] href="[% c.uri_for('/job' project jobset job) %]">[% job %]</a>
[% END;
@@ -91,17 +91,6 @@ BLOCK renderDuration;
duration % 60 %]s[%
END;
BLOCK renderDrvInfo;
drvname = step.drvpath
.substr(11) # strip `/nix/store/`
.split('-').slice(1).join("-") # strip hash part
.substr(0, -4); # strip `.drv`
IF drvname != releasename;
IF step.type == 0; action = "Build"; ELSE; action = "Substitution"; END;
IF drvname; %]<em> ([% HTML.escape(action) %] of [% HTML.escape(drvname) %])</em>[% END;
END;
END;
BLOCK renderBuildListHeader %]
<table class="table table-striped table-condensed clickable-rows">
@@ -140,25 +129,20 @@ BLOCK renderBuildListBody;
[% IF showSchedulingInfo %]
<td>[% IF busy %]<span class="badge badge-success">Started</span>[% ELSE %]<span class="badge badge-secondary">Queued</span>[% END %]</td>
[% END %]
<td><a class="row-link" [% HTML.attributes(href => link) %]>[% HTML.escape(build.id) %]</a></td>
<td><a class="row-link" href="[% link %]">[% build.id %]</a></td>
[% IF !hideJobName %]
<td>
<a [% HTML.attributes(href => link) %]>[% IF !hideJobsetName %][% HTML.escape(build.jobset.get_column("project")) %]:[% HTML.escape(build.jobset.get_column("name")) %]:[% END %][% HTML.escape(build.get_column("job")) %]</a>
[% IF showStepName %]
[% INCLUDE renderDrvInfo step=build.buildsteps releasename=build.nixname %]
[% END %]
</td>
<td><a href="[%link%]">[% IF !hideJobsetName %][%build.jobset.get_column("project")%]:[%build.jobset.get_column("name")%]:[% END %][%build.get_column("job")%]</td>
[% END %]
<td class="nowrap">[% t = showSchedulingInfo ? build.timestamp : build.stoptime; IF t; INCLUDE renderRelativeDate timestamp=(showSchedulingInfo ? build.timestamp : build.stoptime); ELSE; "-"; END %]</td>
<td>[% !showSchedulingInfo and build.get_column('releasename') ? HTML.escape(build.get_column('releasename')) : HTML.escape(build.nixname) %]</td>
<td class="nowrap"><tt>[% build.system | html %]</tt></td>
<td>[% !showSchedulingInfo and build.get_column('releasename') ? build.get_column('releasename') : build.nixname %]</td>
<td class="nowrap"><tt>[% build.system %]</tt></td>
[% IF showDescription %]
<td>[% HTML.escape(build.description) %]</td>
<td>[% build.description %]</td>
[% END %]
</tr>
[% END;
IF linkToAll %]
<tr><td class="centered" colspan="5"><a [% HTML.attributes(href => linkToAll) %]><em>More...</em></a></td></tr>
<tr><td class="centered" colspan="5"><a href="[% linkToAll %]"><em>More...</em></a></td></tr>
[% END;
END;
@@ -176,11 +160,11 @@ BLOCK renderBuildList;
END;
BLOCK renderLink %]<a [% HTML.attributes(href => uri) %]>[% HTML.escape(title) %]</a>[% END;
BLOCK renderLink %]<a href="[% uri %]">[% title %]</a>[% END;
BLOCK maybeLink;
IF uri %]<a [% HTML.attributes(href => uri, class => class); IF confirmmsg +%] onclick="javascript:return confirm('[% confirmmsg %]')"[% END %]>[% HTML.escape(content) %]</a>[% ELSE; HTML.escape(content); END;
IF uri %]<a [% HTML.attributes(href => uri, class => class); IF confirmmsg +%] onclick="javascript:return confirm('[% confirmmsg %]')"[% END %]>[% content %]</a>[% ELSE; content; END;
END;
@@ -192,7 +176,7 @@ BLOCK renderSelection;
<label class="radio inline">
<input type="radio" [% HTML.attributes(id => param, name => param, value => name) %]
[% IF name == curValue; "checked='1'"; END %]>
[% HTML.escape(options.$name) %]
[% options.$name %]
</input>
</label>
[% END %]
@@ -200,7 +184,7 @@ BLOCK renderSelection;
[% ELSE %]
<select class="custom-select" [% HTML.attributes(id => param, name => param) %]>
[% FOREACH name IN options.keys.sort %]
<option [% IF name == curValue; "selected='selected'"; END; " "; HTML.attributes(value => name) %]>[% HTML.escape(options.$name) %]</option>
<option [% IF name == curValue; "selected='selected'"; END; " "; HTML.attributes(value => name) %]>[% options.$name %]</option>
[% END %]
</select>
[% END;
@@ -216,12 +200,12 @@ BLOCK editString; %]
BLOCK renderFullBuildLink;
INCLUDE renderFullJobNameOfBuild build=build %] <a [% HTML.attributes(href => c.uri_for('/build' build.id)) %]>build [% HTML.escape(build.id) %]</a>[%
INCLUDE renderFullJobNameOfBuild build=build %] <a href="[% c.uri_for('/build' build.id) %]">build [% build.id %]</a>[%
END;
BLOCK renderBuildIdLink; %]
<a [% HTML.attributes(href => c.uri_for('/build' id)) %]>build [% HTML.escape(id) %]</a>
<a href="[% c.uri_for('/build' id) %]">build [% id %]</a>
[% END;
@@ -261,27 +245,6 @@ BLOCK renderBuildStatusIcon;
END;
BLOCK renderBusyStatus;
IF step.busy == 1 %]
<strong>Preparing</strong>
[% ELSIF step.busy == 10 %]
<strong>Connecting</strong>
[% ELSIF step.busy == 20 %]
<strong>Sending inputs</strong>
[% ELSIF step.busy == 30 %]
<strong>Building</strong>
[% ELSIF step.busy == 35 %]
<strong>Waiting to receive outputs</strong>
[% ELSIF step.busy == 40 %]
<strong>Receiving outputs</strong>
[% ELSIF step.busy == 50 %]
<strong>Post-processing</strong>
[% ELSE %]
<strong>Unknown state</strong>
[% END;
END;
BLOCK renderStatus;
IF build.finished;
buildstatus = build.buildstatus;
@@ -320,7 +283,7 @@ END;
BLOCK renderShortInputValue;
IF input.type == "build" || input.type == "sysbuild" %]
<a [% HTML.attributes(href => c.uri_for('/build' input.dependency.id)) %]>[% HTML.escape(input.dependency.id) %]</a>
<a href="[% c.uri_for('/build' input.dependency.id) %]">[% input.dependency.id %]</a>
[% ELSIF input.type == "string" %]
<tt>"[% HTML.escape(input.value) %]"</tt>
[% ELSIF input.type == "nix" || input.type == "boolean" %]
@@ -338,7 +301,7 @@ BLOCK renderDiffUri;
url = bi1.uri;
path = url.replace(base, '');
IF url.match(base) %]
<a target="_blank" [% HTML.attributes(href => m.uri.replace('_path_', path).replace('_1_', bi1.revision).replace('_2_', bi2.revision)) %]>[% HTML.escape(contents) %]</a>
<a target="_blank" href="[% m.uri.replace('_path_', path).replace('_1_', bi1.revision).replace('_2_', bi2.revision) %]">[% contents %]</a>
[% nouri = 0;
END;
END;
@@ -347,13 +310,13 @@ BLOCK renderDiffUri;
url = res.0;
branch = res.1;
IF bi1.type == "hg" || bi1.type == "git" %]
<a target="_blank" [% HTML.attributes(href => c.uri_for('/api/scmdiff', {
<a target="_blank" href="[% HTML.escape(c.uri_for('/api/scmdiff', {
uri = url,
rev1 = bi1.revision,
rev2 = bi2.revision,
type = bi1.type,
branch = branch
})) %]>[% HTML.escape(contents) %]</a>
})) %]">[% contents %]</a>
[% ELSE;
contents;
END;
@@ -369,8 +332,8 @@ BLOCK renderInputs; %]
<tbody>
[% FOREACH input IN inputs %]
<tr>
<td><tt>[% input.name | html %]</tt></td>
<td>[% type = input.type; HTML.escape(inputTypes.$type) %]</td>
<td><tt>[% input.name %]</tt></td>
<td>[% type = input.type; inputTypes.$type %]</td>
<td>
[% IF input.type == "build" || input.type == "sysbuild" %]
[% INCLUDE renderFullBuildLink build=input.dependency %]
@@ -383,7 +346,7 @@ BLOCK renderInputs; %]
[% END %]
</td>
<td>[% IF input.revision %][% HTML.escape(input.revision) %][% END %]</td>
<td><tt>[% input.path | html %]</tt></td>
<td><tt>[% input.path %]</tt></td>
</tr>
[% END %]
</tbody>
@@ -407,33 +370,33 @@ BLOCK renderInputDiff; %]
IF bi1.name == bi2.name;
IF bi1.type == bi2.type;
IF bi1.value != bi2.value || bi1.uri != bi2.uri %]
<tr><td><b>[% HTML.escape(bi1.name) %]</b></td><td><tt>[% INCLUDE renderShortInputValue input=bi1 %]</tt> to <tt>[% INCLUDE renderShortInputValue input=bi2 %]</tt></td></tr>
<tr><td><b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderShortInputValue input=bi1 %]</tt> to <tt>[% INCLUDE renderShortInputValue input=bi2 %]</tt></td></tr>
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
[% IF bi1.type == "git" %]
<tr><td>
<b>[% HTML.escape(bi1.name) %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 12) _ ' to ' _ bi2.revision.substr(0, 12)) %]</tt>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
</td></tr>
[% ELSE %]
<tr><td>
<b>[% HTML.escape(bi1.name) %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision _ ' to ' _ bi2.revision) %]</tt>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision _ ' to ' _ bi2.revision) %]</tt>
</td></tr>
[% END %]
[% ELSIF bi1.dependency.id != bi2.dependency.id || bi1.path != bi2.path %]
<tr><td>
<b>[% HTML.escape(bi1.name) %]</b></td><td><tt>[% INCLUDE renderShortInputValue input=bi1 %]</tt> to <tt>[% INCLUDE renderShortInputValue input=bi2 %]</tt>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderShortInputValue input=bi1 %]</tt> to <tt>[% INCLUDE renderShortInputValue input=bi2 %]</tt>
<br/>
<br/>
[% INCLUDE renderInputDiff inputs1=bi1.dependency.inputs inputs2=bi2.dependency.inputs nestedDiff=1 nestLevel=nestLevel+1 %]
</td></tr>
[% END %]
[% ELSE %]
<tr><td><b>[% HTML.escape(bi1.name) %]</b></td><td>Changed input type from '[% type = bi1.type; HTML.escape(inputTypes.$type) %]' to '[% type = bi2.type; HTML.escape(inputTypes.$type) %]'</td></tr>
<tr><td><b>[% bi1.name %]</b></td><td>Changed input type from '[% type = bi1.type; inputTypes.$type %]' to '[% type = bi2.type; inputTypes.$type %]'</td></tr>
[% END;
deletedInput = 0;
END;
END;
IF deletedInput == 1 %]
<tr><td><b>[% HTML.escape(bi1.name) %]</b></td><td>Input not present in this build.</td></tr>
<tr><td><b>[% bi1.name %]</b></td><td>Input not present in this build.</td></tr>
[% END;
END;
END %]
@@ -443,10 +406,10 @@ BLOCK renderInputDiff; %]
BLOCK renderPager %]
<ul class="pagination">
<li class="page-item[% IF page == 1 %] disabled[% END %]"><a class="page-link" [% HTML.attributes(href => "$baseUri?page=1") %]>&laquo; First</a></li>
<li class="page-item[% IF page == 1 %] disabled[% END %]"><a class="page-link" [% HTML.attributes(href => "$baseUri?page=" _ (page - 1)) %]>&lsaquo; Previous</a></li>
<li class="page-item[% IF page * resultsPerPage >= total %] disabled[% END %]"><a class="page-link" [% HTML.attributes(href => "$baseUri?page=" _ (page + 1)) %]>Next &rsaquo;</a></li>
<li class="page-item[% IF page * resultsPerPage >= total %] disabled[% END %]"><a class="page-link" [% HTML.attributes(href => "$baseUri?page=" _ ((total - 1) div resultsPerPage + 1)) %]>Last &raquo;</a></li>
<li class="page-item[% IF page == 1 %] disabled[% END %]"><a class="page-link" href="[% "$baseUri?page=1" %]">&laquo; First</a></li>
<li class="page-item[% IF page == 1 %] disabled[% END %]"><a class="page-link" href="[% "$baseUri?page="; (page - 1) %]">&lsaquo; Previous</a></li>
<li class="page-item[% IF page * resultsPerPage >= total %] disabled[% END %]"><a class="page-link" href="[% "$baseUri?page="; (page + 1) %]">Next &rsaquo;</a></li>
<li class="page-item[% IF page * resultsPerPage >= total %] disabled[% END %]"><a class="page-link" href="[% "$baseUri?page="; (total - 1) div resultsPerPage + 1 %]">Last &raquo;</a></li>
</ul>
[% END;
@@ -455,13 +418,13 @@ BLOCK renderShortEvalInput;
IF input.type == "svn" || input.type == "svn-checkout" || input.type == "bzr" || input.type == "bzr-checkout" %]
r[% input.revision %]
[% ELSIF input.type == "git" %]
<tt>[% input.revision.substr(0, 7) | html %]</tt>
<tt>[% input.revision.substr(0, 7) %]</tt>
[% ELSIF input.type == "hg" %]
<tt>[% input.revision.substr(0, 12) | html %]</tt>
<tt>[% input.revision.substr(0, 12) %]</tt>
[% ELSIF input.type == "build" || input.type == "sysbuild" %]
<a [% HTML.attributes(href => c.uri_for('/build' input.get_column('dependency'))) %]>[% HTML.escape(input.get_column('dependency')) %]</a>
<a href="[% c.uri_for('/build' input.get_column('dependency')) %]">[% input.get_column('dependency') %]</a>
[% ELSE %]
<tt>[% input.revision | html %]</tt>
<tt>[% input.revision %]</tt>
[% END;
END;
@@ -498,7 +461,7 @@ BLOCK renderEvals %]
eval = e.eval;
link = c.uri_for(c.controller('JobsetEval').action_for('view'), [eval.id]) %]
<tr>
<td><a class="row-link" [% HTML.attributes(href => link) %]>[% HTML.escape(eval.id) %]</a></td>
<td><a class="row-link" href="[% link %]">[% eval.id %]</a></td>
[% IF !jobset && !build %]
<td>[% INCLUDE renderFullJobsetName project=eval.jobset.project.name jobset=eval.jobset.name %]</td>
[% END %]
@@ -507,40 +470,40 @@ BLOCK renderEvals %]
[% IF e.changedInputs.size > 0;
sep='';
FOREACH input IN e.changedInputs;
sep; %] [% HTML.escape(input.name) %] → [% INCLUDE renderShortEvalInput input=input;
sep; %] [% input.name %] → [% INCLUDE renderShortEvalInput input=input;
sep=', ';
END;
ELSE %]
-
[% END %]
[% IF eval.evaluationerror.has_error %]
[% IF eval.evaluationerror.errormsg %]
<span class="badge badge-warning">Eval Errors</span>
[% END %]
</td>
<td align='right' class="nowrap">
<span class="badge badge-success">[% HTML.escape(e.nrSucceeded) %]</span>
<span class="badge badge-success">[% e.nrSucceeded %]</span>
</td>
<td align="right" class="nowrap">
[% IF e.nrFailed > 0 %]
<span class="badge badge-danger">[% HTML.escape(e.nrFailed) %]</span>
<span class="badge badge-danger">[% e.nrFailed %]</span>
[% END %]
</td>
<td align="right" class="nowrap">
[% IF e.nrScheduled > 0 %]
<span class="badge badge-secondary">[% HTML.escape(e.nrScheduled) %]</span>
<span class="badge badge-secondary">[% e.nrScheduled %]</span>
[% END %]
</td>
<td align='right' class="nowrap">
[% IF e.diff > 0 %]
<span class='badge badge-success'><strong>+[% HTML.escape(e.diff) %]</strong></span>
<span class='badge badge-success'><strong>+[% e.diff %]</strong></span>
[% ELSIF e.diff < 0 && e.nrScheduled == 0 %]
<span class='badge badge-danger'><strong>[% HTML.escape(e.diff) %]</strong></span>
<span class='badge badge-danger'><strong>[% e.diff %]</strong></span>
[% END %]
</td>
</tr>
[% END;
IF linkToAll %]
<tr><td class="centered" colspan="7"><a [% HTML.attributes(href => linkToAll) %]><em>More...</em></a></td></tr>
<tr><td class="centered" colspan="7"><a href="[% linkToAll %]"><em>More...</em></a></td></tr>
[% END %]
</tbody>
</table>
@@ -548,19 +511,19 @@ BLOCK renderEvals %]
BLOCK renderLogLinks %]
(<a [% IF inRow %]class="row-link"[% END %] [% HTML.attributes(href => url) %]>log</a>, <a [% HTML.attributes(href => "$url/raw") %]>raw</a>, <a [% HTML.attributes(href => "$url/tail") %]>tail</a>)
(<a [% IF inRow %]class="row-link"[% END %] href="[% url %]">log</a>, <a href="[% "$url/raw" %]">raw</a>, <a href="[% "$url/tail" %]">tail</a>)
[% END;
BLOCK makeLazyTab %]
<div [% HTML.attributes(id => tabName) %] class="tab-pane">
<div id="[% tabName %]" class="tab-pane">
<center><span class="spinner-border spinner-border-sm"/></center>
</div>
<script>
[% IF callback.defined %]
$(function() { makeLazyTab("[% HTML.escape(tabName) %]", "[% uri %]", [% callback %] ); });
$(function() { makeLazyTab("[% tabName %]", "[% uri %]", [% callback %] ); });
[% ELSE %]
$(function() { makeLazyTab("[% HTML.escape(tabName) %]", "[% uri %]", null ); });
$(function() { makeLazyTab("[% tabName %]", "[% uri %]", null ); });
[% END %]
</script>
[% END;
@@ -587,7 +550,7 @@ BLOCK navItem %]
<li class="nav-item">
<a class="nav-link[% IF "${root}${curUri}" == uri %] active[% END %]"
[% HTML.attributes(href => uri) %]>
[% HTML.escape(title) %]
[% title %]
</a>
</li>
[% END;
@@ -639,7 +602,7 @@ BLOCK renderJobsetOverview %]
<td>[% HTML.escape(j.description) %]</td>
<td>[% IF j.lastcheckedtime;
INCLUDE renderDateTime timestamp = j.lastcheckedtime;
IF j.has_error || j.fetcherrormsg; %]&nbsp;<span class = 'badge badge-warning'>Error</span>[% END;
IF j.errormsg || j.fetcherrormsg; %]&nbsp;<span class = 'badge badge-warning'>Error</span>[% END;
ELSE; "-";
END %]</td>
[% IF j.get_column('nrtotal') > 0 %]
@@ -657,17 +620,17 @@ BLOCK renderJobsetOverview %]
<td><span class="[% class %]">[% successrate FILTER format('%d') %]%</span></td>
<td>
[% IF j.get_column('nrsucceeded') > 0 %]
<span class="badge badge-success">[% HTML.escape(j.get_column('nrsucceeded')) %]</span>
<span class="badge badge-success">[% j.get_column('nrsucceeded') %]</span>
[% END %]
</td>
<td>
[% IF j.get_column('nrfailed') > 0 %]
<span class="badge badge-danger">[% HTML.escape(j.get_column('nrfailed')) %]</span>
<span class="badge badge-danger">[% j.get_column('nrfailed') %]</span>
[% END %]
</td>
<td>
[% IF j.get_column('nrscheduled') > 0 %]
<span class="badge badge-secondary">[% HTML.escape(j.get_column('nrscheduled')) %]</span>
<span class="badge badge-secondary">[% j.get_column('nrscheduled') %]</span>
[% END %]
</td>
</tr>
@@ -685,22 +648,14 @@ BLOCK includeFlot %]
[% END;
BLOCK renderYesNo %]
[% IF value %]
<span class="text-success">Yes</span>
[% ELSE %]
<span class="text-danger">No</span>
[% END %]
[% END;
BLOCK createChart %]
<div id="[% id %]-chart" style="width: 1000px; height: 400px;"></div>
<div id="[% id %]-overview" style="margin-top: 20px; margin-left: 50px; margin-right: 50px; width: 900px; height: 100px"></div>
<div id="[%id%]-chart" style="width: 1000px; height: 400px;"></div>
<div id="[%id%]-overview" style="margin-top: 20px; margin-left: 50px; margin-right: 50px; width: 900px; height: 100px"></div>
<script type="text/javascript">
$(function() {
showChart("[% HTML.escape(id) %]", "[% dataUrl %]", "[% yaxis %]");
showChart("[%id%]", "[%dataUrl%]", "[%yaxis%]");
});
</script>

View File

@@ -9,7 +9,7 @@
[% ELSE %]
<p>Below are the most recent builds of the [% HTML.escape(builds.size) %] jobs of which you
<p>Below are the most recent builds of the [% builds.size %] jobs of which you
(<tt>[% HTML.escape(user.emailaddress) %]</tt>) are a maintainer.</p>
[% INCLUDE renderBuildList %]

Some files were not shown because too many files have changed in this diff Show More