Compare commits
16 Commits
pure-eval
...
merge-tabl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f92a143c5f | ||
|
|
3eeb035d5a | ||
|
|
2a471ce0a4 | ||
|
|
a4b63db992 | ||
|
|
0420232324 | ||
|
|
4dfca7f83b | ||
|
|
36bd37d353 | ||
|
|
84834881c3 | ||
|
|
2560e97c1e | ||
|
|
33ba3cf330 | ||
|
|
5771fb33e0 | ||
|
|
a07a2e6687 | ||
|
|
8781c35de7 | ||
|
|
b061a8cea9 | ||
|
|
dd2e91e7cc | ||
|
|
c454ecb21c |
@@ -1,24 +0,0 @@
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Unix-style newlines with a newline ending every file
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
[*.{cc,hh,hpp,pl,pm,sh,t}]
|
||||
indent_style = space
|
||||
intend_size = 4
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[*.nix]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
# Match diffs, avoid to trim trailing whitespace
|
||||
[*.{diff,patch}]
|
||||
trim_trailing_whitespace = false
|
||||
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
37
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,37 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Hydra Server:**
|
||||
|
||||
Please fill out this data as well as you can, but don't worry if you can't -- just do your best.
|
||||
|
||||
- OS and version: [e.g. NixOS 22.05.20211203.ee3794c]
|
||||
- Version of Hydra
|
||||
- Version of Nix Hydra is built against
|
||||
- Version of the Nix daemon
|
||||
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
27
.github/workflows/test.yml
vendored
27
.github/workflows/test.yml
vendored
@@ -1,27 +0,0 @@
|
||||
name: "Test"
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
tests:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- system: x86_64-linux
|
||||
runner: ubuntu-latest
|
||||
- system: aarch64-linux
|
||||
runner: ubuntu-24.04-arm
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: cachix/install-nix-action@v31
|
||||
with:
|
||||
extra_nix_config: |
|
||||
extra-systems = ${{ matrix.system }}
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- run: nix-build -A checks.${{ matrix.system }}.build -A checks.${{ matrix.system }}.validate-openapi
|
||||
28
.github/workflows/update-flakes.yml
vendored
28
.github/workflows/update-flakes.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: "Update Flakes"
|
||||
on:
|
||||
schedule:
|
||||
# Run weekly on Monday at 00:00 UTC
|
||||
- cron: '0 0 * * 1'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
update-flakes:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v31
|
||||
- name: Update flake inputs
|
||||
run: nix flake update
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
commit-message: "flake.lock: Update"
|
||||
title: "Update flake inputs"
|
||||
body: |
|
||||
Automated flake input updates.
|
||||
|
||||
This PR was automatically created by the update-flakes workflow.
|
||||
branch: update-flakes
|
||||
delete-branch: true
|
||||
28
.gitignore
vendored
28
.gitignore
vendored
@@ -1,13 +1,17 @@
|
||||
*.o
|
||||
*~
|
||||
/.direnv/
|
||||
.test_info.*
|
||||
/src/root/static/bootstrap
|
||||
/src/root/static/fontawesome
|
||||
/src/root/static/js/flot
|
||||
/src/sql/hydra-postgresql.sql
|
||||
/src/sql/hydra-sqlite.sql
|
||||
/src/sql/tmp.sqlite
|
||||
.hydra-data
|
||||
result
|
||||
result-*
|
||||
outputs
|
||||
Makefile
|
||||
Makefile.in
|
||||
.deps
|
||||
/config.guess
|
||||
/config.log
|
||||
/config.status
|
||||
/config.sub
|
||||
/configure
|
||||
/depcomp
|
||||
/libtool
|
||||
/ltmain.sh
|
||||
/autom4te.cache
|
||||
/aclocal.m4
|
||||
/missing
|
||||
/install-sh
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
theme = community
|
||||
|
||||
# 5 is the least complainy, 1 is the most complainy
|
||||
severity = 1
|
||||
|
||||
# Disallow backticks - use IPC::Run3 instead for better security
|
||||
include = InputOutput::ProhibitBacktickOperators
|
||||
|
||||
# Prohibit shell-invoking system() and exec() - use list form or IPC::Run3 instead
|
||||
include = Hydra::ProhibitShellInvokingSystemCalls
|
||||
4
Makefile.am
Normal file
4
Makefile.am
Normal file
@@ -0,0 +1,4 @@
|
||||
SUBDIRS = doc src tests
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
|
||||
6
Procfile
6
Procfile
@@ -1,6 +0,0 @@
|
||||
hydra-evaluator: ./foreman/start-evaluator.sh
|
||||
hydra-queue-runner: ./foreman/start-queue-runner.sh
|
||||
hydra-notify: ./foreman/start-notify.sh
|
||||
hydra-server: ./foreman/start-hydra.sh
|
||||
manual: ./foreman/start-manual.sh
|
||||
postgres: ./foreman/start-postgres.sh
|
||||
156
README.md
156
README.md
@@ -1,156 +0,0 @@
|
||||
# Hydra
|
||||
|
||||
[](https://github.com/NixOS/hydra/actions)
|
||||
|
||||
Hydra is a [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) service for [Nix](https://nixos.org/nix) based projects.
|
||||
|
||||
## Installation And Setup
|
||||
|
||||
**Note**: The instructions provided below are intended to enable new users to get a simple, local installation up and running. They are by no means sufficient for running a production server, let alone a public instance.
|
||||
|
||||
### Enabling The Service
|
||||
Running Hydra is currently only supported on NixOS. The [hydra module](https://github.com/NixOS/nixpkgs/blob/release-20.03/nixos/modules/services/continuous-integration/hydra/default.nix) allows for an easy setup. The following configuration can be used for a simple setup that performs all builds on _localhost_ (Please refer to the [Options page](https://nixos.org/nixos/options.html#services.hydra) for all available options):
|
||||
|
||||
```nix
|
||||
{
|
||||
services.hydra = {
|
||||
enable = true;
|
||||
hydraURL = "http://localhost:3000";
|
||||
notificationSender = "hydra@localhost";
|
||||
buildMachinesFiles = [];
|
||||
useSubstitutes = true;
|
||||
};
|
||||
}
|
||||
```
|
||||
### Creating An Admin User
|
||||
Once the Hydra service has been configured as above and activated, you should already be able to access the UI interface at the specified URL. However some actions require an admin user which has to be created first:
|
||||
|
||||
```
|
||||
$ su - hydra
|
||||
$ hydra-create-user <USER> --full-name '<NAME>' \
|
||||
--email-address '<EMAIL>' --password-prompt --role admin
|
||||
```
|
||||
|
||||
Afterwards you should be able to log by clicking on "_Sign In_" on the top right of the web interface using the credentials specified by `hydra-create-user`. Once you are logged in you can click "_Admin -> Create Project_" to configure your first project.
|
||||
|
||||
### Creating A Simple Project And Jobset
|
||||
In order to evaluate and build anything you need to create _projects_ that contain _jobsets_. Hydra supports imperative and declarative projects and many different configurations. The steps below will guide you through the required steps to creating a minimal imperative project configuration.
|
||||
|
||||
#### Creating A Project
|
||||
Log in as administrator, click "_Admin_" and select "_Create project_". Fill the form as follows:
|
||||
|
||||
- **Identifier**: `hello-project`
|
||||
- **Display name**: `hello`
|
||||
- **Description**: `hello project`
|
||||
|
||||
Click "_Create project_".
|
||||
|
||||
#### Creating A Jobset
|
||||
After creating a project you are forwarded to the project page. Click "_Actions_" and choose "_Create jobset_". Change **Type** to Legacy for the example below. Fill the form with the following values:
|
||||
|
||||
- **Identifier**: `hello-project`
|
||||
- **Nix expression**: `examples/hello.nix` in `hydra`
|
||||
- **Check interval**: 60
|
||||
- **Scheduling shares**: 1
|
||||
|
||||
We have to add two inputs for this jobset. One for _nixpkgs_ and one for _hydra_ (which we are referencing in the Nix expression above):
|
||||
|
||||
- **Input name**: `nixpkgs`
|
||||
- **Type**: `Git checkout`
|
||||
- **Value**: `https://github.com/NixOS/nixpkgs nixos-24.05`
|
||||
|
||||
- **Input name**: `hydra`
|
||||
- **Type**: `Git checkout`
|
||||
- **Value**: `https://github.com/nixos/hydra`
|
||||
|
||||
Make sure **State** at the top of the page is set to "_Enabled_" and click on "_Create jobset_". This concludes the creation of a jobset that evaluates [./examples/hello.nix](./examples/hello.nix) once a minute. Clicking "_Evaluations_" should list the first evaluation of the newly created jobset after a brief delay.
|
||||
|
||||
## Building And Developing
|
||||
|
||||
### Building Hydra
|
||||
|
||||
You can build Hydra via `nix-build` using the provided [default.nix](./default.nix):
|
||||
|
||||
```
|
||||
$ nix build
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
```
|
||||
$ nix develop
|
||||
$ ln -svf ../../../build/src/bootstrap src/root/static/bootstrap
|
||||
$ ln -svf ../../../build/src/fontawesome src/root/static/fontawesome
|
||||
$ ln -svf ../../../../build/src/flot src/root/static/js/flot
|
||||
$ meson setup build
|
||||
$ ninja -C build
|
||||
```
|
||||
|
||||
The development environment can also automatically be established using [nix-direnv](https://github.com/nix-community/nix-direnv).
|
||||
|
||||
### Executing Hydra During Development
|
||||
|
||||
When working on new features or bug fixes you need to be able to run Hydra from your working copy. This
|
||||
can be done using [foreman](https://github.com/ddollar/foreman):
|
||||
|
||||
```
|
||||
$ nix develop
|
||||
$ # hack hack
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
Have a look at the [Procfile](./Procfile) if you want to see how the processes are being started. In order to avoid
|
||||
conflicts with services that might be running on your host, hydra and postgress are started on custom ports:
|
||||
|
||||
- hydra-server: 63333 with the username "alice" and the password "foobar"
|
||||
- postgresql: 64444, can be connected to using `psql -p 64444 -h localhost hydra`
|
||||
|
||||
Note that this is only ever meant as an ad-hoc way of executing Hydra during development. Please make use of the
|
||||
NixOS module for actually running Hydra in production.
|
||||
|
||||
### Checking your patches
|
||||
|
||||
After making your changes, verify the test suite passes and perlcritic is still happy.
|
||||
|
||||
Start by following the steps in [Development Environment](#development-environment).
|
||||
|
||||
Then, you can run the tests and the perlcritic linter together with:
|
||||
|
||||
```console
|
||||
$ nix develop
|
||||
$ ninja -C build test
|
||||
```
|
||||
|
||||
You can run a single test with:
|
||||
|
||||
```
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test --test-args=../t/Hydra/Event.t testsuite
|
||||
```
|
||||
|
||||
And you can run just perlcritic with:
|
||||
|
||||
```
|
||||
$ nix develop
|
||||
$ cd build
|
||||
$ meson test perlcritic
|
||||
```
|
||||
|
||||
### JSON API
|
||||
|
||||
You can also interface with Hydra through a JSON API. The API is defined in [hydra-api.yaml](./hydra-api.yaml) and you can test and explore via the [swagger editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/NixOS/hydra/master/hydra-api.yaml)
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Hydra User's Guide](https://nixos.org/hydra/manual/)
|
||||
- [Hydra on the NixOS Wiki](https://wiki.nixos.org/wiki/Hydra)
|
||||
- [hydra-cli](https://github.com/nlewo/hydra-cli)
|
||||
- [Peter Simons - Hydra: Setting up your own build farm (NixOS)](https://www.youtube.com/watch?v=RXV0Y5Bn-QQ)
|
||||
|
||||
## License
|
||||
Hydra is licensed under [GPL-3.0](./COPYING)
|
||||
|
||||
Icons provided free by [EmojiOne](http://emojione.com).
|
||||
97
configure.ac
Normal file
97
configure.ac
Normal file
@@ -0,0 +1,97 @@
|
||||
AC_INIT([Hydra], [m4_esyscmd([echo -n $(cat ./version)$VERSION_SUFFIX])],
|
||||
[nix-dev@cs.uu.nl], [hydra], [http://nixos.org/hydra/])
|
||||
AM_INIT_AUTOMAKE([foreign])
|
||||
|
||||
AC_LANG([C++])
|
||||
|
||||
AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_LN_S
|
||||
AC_PROG_LIBTOOL
|
||||
AC_PROG_CXX
|
||||
|
||||
dnl Optional dependencies to build the manual, normally not needed
|
||||
dnl since the tarball comes with the PDF and HTML manuals.
|
||||
AC_PATH_PROG([DBLATEX], [dblatex])
|
||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
||||
|
||||
AC_ARG_WITH([docbook-xsl],
|
||||
[AS_HELP_STRING([--with-docbook-xsl=PATH],
|
||||
[path of the DocBook XSL stylesheets])],
|
||||
[docbookxsl="$withval"],
|
||||
[docbookxsl="/docbook-xsl-missing"])
|
||||
AC_SUBST([docbookxsl])
|
||||
|
||||
|
||||
AC_DEFUN([NEED_PROG],
|
||||
[
|
||||
AC_PATH_PROG($1, $2)
|
||||
if test -z "$$1"; then
|
||||
AC_MSG_ERROR([$2 is required])
|
||||
fi
|
||||
])
|
||||
|
||||
NEED_PROG(perl, perl)
|
||||
|
||||
AC_ARG_WITH(nix, AC_HELP_STRING([--with-nix=PATH],
|
||||
[prefix of nix]),
|
||||
nix=$withval, nix=/nix-missing)
|
||||
AC_SUBST(nix)
|
||||
|
||||
PATH="$nix/bin:$PATH"
|
||||
export PATH
|
||||
|
||||
NEED_PROG([NIX_STORE_PROGRAM], [nix-store])
|
||||
|
||||
AC_MSG_CHECKING([whether $NIX_STORE_PROGRAM is recent enough])
|
||||
if test -n "$NIX_STORE" -a -n "$TMPDIR"
|
||||
then
|
||||
# This may be executed from within a build chroot, so pacify
|
||||
# `nix-store' instead of letting it choke while trying to mkdir
|
||||
# /nix/var.
|
||||
NIX_STATE_DIR="$TMPDIR"
|
||||
export NIX_STATE_DIR
|
||||
fi
|
||||
if "$NIX_STORE_PROGRAM" --timeout 123 -q > /dev/null 2>&1
|
||||
then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_ERROR([`$NIX_STORE_PROGRAM' doesn't support `--timeout'; please use a newer version.])
|
||||
fi
|
||||
|
||||
old_CPPFLAGS="$CPPFLAGS"
|
||||
old_LIBS="$LIBS"
|
||||
|
||||
CPPFLAGS="$CPPFLAGS -I$nix/include/nix"
|
||||
LDFLAGS="$LDFLAGS -L$nix/lib/nix"
|
||||
|
||||
AC_CHECK_HEADER([store-api.hh], [:],
|
||||
[AC_MSG_ERROR([Nix headers not found; please install Nix or check the `--with-nix' option.])])
|
||||
AC_CHECK_LIB([expr], [_ZN3nix9EvalState17parseExprFromFileESs], [:],
|
||||
[AC_MSG_ERROR([Nix library not found; please install Nix or check the `--with-nix' option.])])
|
||||
|
||||
CPPFLAGS="$old_CPPFLAGS"
|
||||
LIBS="$old_LIBS"
|
||||
|
||||
PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
|
||||
|
||||
testPath="$(dirname $(type -p expr))"
|
||||
AC_SUBST(testPath)
|
||||
|
||||
AC_CONFIG_FILES([
|
||||
Makefile
|
||||
doc/Makefile
|
||||
doc/manual/Makefile
|
||||
src/Makefile
|
||||
src/c/Makefile
|
||||
src/sql/Makefile
|
||||
src/xsl/Makefile
|
||||
src/lib/Makefile
|
||||
src/root/Makefile
|
||||
src/script/Makefile
|
||||
tests/Makefile
|
||||
tests/jobs/config.nix
|
||||
])
|
||||
|
||||
AC_OUTPUT
|
||||
@@ -1,21 +0,0 @@
|
||||
# Hydra status timeboard
|
||||
|
||||
In order to deploy hydra status dashboard you can:
|
||||
|
||||
* create a deployment
|
||||
|
||||
```
|
||||
nixops create -d hydra-status /path/to/hydra/datadog/dd-dashboard.nix
|
||||
```
|
||||
|
||||
* setup the default hostname and api/app keys
|
||||
|
||||
```
|
||||
nixops set-args -d hydra-status --argst appKey <app_key> --argstr apiKey <api_key> --argstr host chef
|
||||
```
|
||||
|
||||
* deploy
|
||||
|
||||
```
|
||||
nixops deploy -d hydra-status
|
||||
```
|
||||
@@ -1,161 +0,0 @@
|
||||
{
|
||||
host
|
||||
, appKey
|
||||
, apiKey
|
||||
, ...
|
||||
}:
|
||||
{
|
||||
resources.datadogTimeboards.hydra-status = {
|
||||
inherit appKey apiKey;
|
||||
description = "Hydra build farm status";
|
||||
graphs = [
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:hydra.queue.steps.active{$host}"; }
|
||||
{ q = "avg:hydra.queue.steps.building{$host}"; }
|
||||
{ q = "avg:hydra.queue.steps.copying_to{$host}"; }
|
||||
{ q = "avg:hydra.queue.steps.copying_from{$host}"; }
|
||||
{ q = "avg:hydra.queue.steps.waiting{$host}"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Active/building steps";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:hydra.queue.steps.avg_build_time{$host}"; }
|
||||
{ q = "avg:hydra.queue.steps.avg_total_time{$host}"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Build/total time per step";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:hydra.queue.steps.finished{$host}"; }
|
||||
{ q = "avg:hydra.queue.builds.finished{$host}"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Finished builds/steps";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "max:system.io.await{$host} by {device}"; type = "area"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Disk latency (ms, by device)";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:hydra.queue.steps.unfinished{$host}"; }
|
||||
{ q = "avg:hydra.queue.builds.unfinished{$host}"; }
|
||||
{ q = "avg:hydra.queue.steps.runnable{$host}"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Unfinished builds/steps";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:system.load.1{$host}"; }
|
||||
{ q = "avg:system.load.5{$host}"; }
|
||||
{ q = "avg:system.load.15{$host}"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Load Averages 1-5-15";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "per_hour(ewma_20(avg:hydra.queue.steps.finished{$host}))"; }
|
||||
{
|
||||
q = "per_hour(ewma_20(avg:hydra.queue.builds.finished{$host}))";
|
||||
}
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Finished builds/steps / hour";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [ { q = "avg:hydra.mem.dirty{$host}"; } ];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Dirty memory";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{
|
||||
aggregator = "avg";
|
||||
conditional_formats = [];
|
||||
q = "avg:system.mem.used{$host}";
|
||||
type = "line";
|
||||
}
|
||||
{
|
||||
conditional_formats = [];
|
||||
q = "avg:system.mem.free{$host}";
|
||||
type = "line";
|
||||
}
|
||||
{
|
||||
conditional_formats = [];
|
||||
q = "avg:system.mem.usable{$host}";
|
||||
type = "line";
|
||||
}
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Memory usage";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:hydra.queue.bytes_sent{$host}"; type = "line"; }
|
||||
{ q = "avg:hydra.queue.bytes_received{$host}"; type = "line"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Stores paths sent/received";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "per_minute(ewma_20(avg:hydra.queue.bytes_sent{$host}))"; }
|
||||
{
|
||||
q = "per_minute(ewma_20(avg:hydra.queue.bytes_received{$host}))";
|
||||
}
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Store paths sent/received (GiB / minute)";
|
||||
}
|
||||
{
|
||||
definition = builtins.toJSON {
|
||||
requests = [
|
||||
{ q = "avg:hydra.queue.machines.total{$host}"; type = "line"; }
|
||||
{ q = "avg:hydra.queue.machines.in_use{$host}"; type = "line"; }
|
||||
];
|
||||
viz = "timeseries";
|
||||
};
|
||||
title = "Total and active machines";
|
||||
}
|
||||
];
|
||||
templateVariables = [
|
||||
{
|
||||
default = "host:${host}";
|
||||
name = "host";
|
||||
prefix = "host";
|
||||
}
|
||||
];
|
||||
title = "Hydra Status (deployed from nixops)";
|
||||
};
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).defaultNix
|
||||
38
deps.nix
Normal file
38
deps.nix
Normal file
@@ -0,0 +1,38 @@
|
||||
{ pkgs }:
|
||||
|
||||
with pkgs;
|
||||
|
||||
[ perlPackages.CatalystAuthenticationStoreDBIxClass
|
||||
perlPackages.CatalystPluginAccessLog
|
||||
perlPackages.CatalystPluginAuthorizationRoles
|
||||
perlPackages.CatalystPluginSessionStateCookie
|
||||
perlPackages.CatalystPluginSessionStoreFastMmap
|
||||
perlPackages.CatalystPluginStackTrace
|
||||
perlPackages.CatalystViewDownload
|
||||
perlPackages.CatalystViewJSON
|
||||
perlPackages.CatalystViewTT
|
||||
perlPackages.CatalystXScriptServerStarman
|
||||
perlPackages.CryptRandPasswd
|
||||
perlPackages.DBDPg
|
||||
perlPackages.DBDSQLite
|
||||
perlPackages.DataDump
|
||||
perlPackages.DateTime
|
||||
perlPackages.DigestSHA1
|
||||
perlPackages.EmailSender
|
||||
perlPackages.FileSlurp
|
||||
perlPackages.IOCompress
|
||||
perlPackages.IPCRun
|
||||
perlPackages.JSONXS
|
||||
perlPackages.NetTwitterLite
|
||||
perlPackages.PadWalker
|
||||
perlPackages.CatalystDevel
|
||||
perlPackages.Readonly
|
||||
perlPackages.SQLSplitStatement
|
||||
perlPackages.Starman
|
||||
perlPackages.SysHostnameLong
|
||||
perlPackages.TestMore
|
||||
perlPackages.TextDiff
|
||||
perlPackages.TextTable
|
||||
perlPackages.XMLSimple
|
||||
nixUnstable
|
||||
]
|
||||
4
doc/Makefile.am
Normal file
4
doc/Makefile.am
Normal file
@@ -0,0 +1,4 @@
|
||||
SUBDIRS = manual
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
This is a rough overview from informal discussions and explanations of inner workings of Hydra.
|
||||
You can use it as a guide to navigate the codebase or ask questions.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Components
|
||||
|
||||
- Postgres database
|
||||
- configuration
|
||||
- build queue
|
||||
- what is already built
|
||||
- what is going to build
|
||||
- `hydra-server`
|
||||
- Perl, Catalyst
|
||||
- web frontend
|
||||
- `hydra-evaluator`
|
||||
- Perl, C++
|
||||
- fetches repositories
|
||||
- evaluates job sets
|
||||
- pointers to a repository
|
||||
- adds builds to the queue
|
||||
- `hydra-queue-runner`
|
||||
- C++
|
||||
- monitors the queue
|
||||
- executes build steps
|
||||
- uploads build results
|
||||
- copy to a Nix store
|
||||
- Nix store
|
||||
- contains `.drv`s
|
||||
- populated by `hydra-evaluator`
|
||||
- read by `hydra-queue-runner`
|
||||
- destination Nix store
|
||||
- can be a binary cache
|
||||
- e.g. `[cache.nixos.org](http://cache.nixos.org)` or the same store again (for small Hydra instances)
|
||||
- plugin architecture
|
||||
- extend evaluator for new kinds of repositories
|
||||
- e.g. fetch from `git`
|
||||
|
||||
### Database Schema
|
||||
|
||||
[https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql](https://github.com/NixOS/hydra/blob/master/src/sql/hydra.sql)
|
||||
|
||||
- `Jobsets`
|
||||
- populated by calling Nix evaluator
|
||||
- every Nix derivation in `release.nix` is a Job
|
||||
- `flake`
|
||||
- URL to flake, if job is from a flake
|
||||
- single-point of configuration for flake builds
|
||||
- flake itself contains pointers to dependencies
|
||||
- for other builds we need more configuration data
|
||||
- `JobsetInputs`
|
||||
- more configuration for a Job
|
||||
- `JobsetInputAlts`
|
||||
- historical, where you could have more than one alternative for each input
|
||||
- it would have done the cross product of all possibilities
|
||||
- not used any more, as now every input is unique
|
||||
- originally that was to have alternative values for the system parameter
|
||||
- `x86-linux`, `x86_64-darwin`
|
||||
- turned out not to be a good idea, as job set names did not uniquely identify output
|
||||
- `Builds`
|
||||
- queue: scheduled and finished builds
|
||||
- instance of a Job
|
||||
- corresponds to a top-level derivation
|
||||
- can have many dependencies that don’t have a corresponding build
|
||||
- dependencies represented as `BuildSteps`
|
||||
- a Job is all the builds with a particular name, e.g.
|
||||
- `git.x86_64-linux` is a job
|
||||
- there maybe be multiple builds for that job
|
||||
- build ID: just an auto-increment number
|
||||
- building one thing can actually cause many (hundreds of) derivations to be built
|
||||
- for queued builds, the `drv` has to be present in the store
|
||||
- otherwise build will fail, e.g. after garbage collection
|
||||
- `BuildSteps`
|
||||
- corresponds to a derivation or substitution
|
||||
- are reused through the Nix store
|
||||
- may be duplicated for unique derivations due to how they relate to `Jobs`
|
||||
- `BuildStepOutputs`
|
||||
- corresponds directly to derivation outputs
|
||||
- `out`, `dev`, ...
|
||||
- `BuildProducts`
|
||||
- not a Nix concept
|
||||
- populated from a special file `$out/nix-support/hydra-build-producs`
|
||||
- used to scrape parts of build results out to the web frontend
|
||||
- e.g. manuals, ISO images, etc.
|
||||
- `BuildMetrics`
|
||||
- scrapes data from magic location, similar to `BuildProducts` to show fancy graphs
|
||||
- e.g. test coverage, build times, CPU utilization for build
|
||||
- `$out/nix-support/hydra-metrics`
|
||||
- `BuildInputs`
|
||||
- probably obsolute
|
||||
- `JobsetEvalMembers`
|
||||
- joins evaluations with jobs
|
||||
- huge table, 10k’s of entries for one `nixpkgs` evaluation
|
||||
- can be imagined as a subset of the eval cache
|
||||
- could in principle use the eval cache
|
||||
|
||||
### `release.nix`
|
||||
|
||||
- hydra-specific convention to describe the build
|
||||
- should evaluate to an attribute set that contains derivations
|
||||
- hydra considers every attribute in that set a job
|
||||
- every job needs a unique name
|
||||
- if you want to build for multiple platforms, you need to reflect that in the name
|
||||
- hydra does a deep traversal of the attribute set
|
||||
- just evaluating the names may take half an hour
|
||||
|
||||
## FAQ
|
||||
|
||||
Can we imagine Hydra to be a persistence layer for the build graph?
|
||||
|
||||
- partially, it lacks a lot of information
|
||||
- does not keep edges of the build graph
|
||||
|
||||
How does Hydra relate to `nix build`?
|
||||
|
||||
- reimplements the top level Nix build loop, scheduling, etc.
|
||||
- Hydra has to persist build results
|
||||
- Hydra has more sophisticated remote build execution and scheduling than Nix
|
||||
|
||||
Is it conceptually possible to unify Hydra’s capabilities with regular Nix?
|
||||
|
||||
- Nix does not have any scheduling, it just traverses the build graph
|
||||
- Hydra has scheduling in terms of job set priorities, tracks how much of a job set it has worked on
|
||||
- makes sure jobs don’t starve each other
|
||||
- Nix cannot dynamically add build jobs at runtime
|
||||
- [RFC 92](https://github.com/NixOS/rfcs/blob/master/rfcs/0092-plan-dynamism.md) should enable that
|
||||
- internally it is already possible, but there is no interface to do that
|
||||
- Hydra queue runner is a long running process
|
||||
- Nix takes a static set of jobs, working it off at once
|
||||
@@ -8,22 +8,39 @@
|
||||
|
||||
* Setting the maximum number of concurrent builds per system type:
|
||||
|
||||
$ psql -d hydra <<< "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
|
||||
$ sqlite3 hydra.sqlite "insert into SystemTypes(system, maxConcurrent) values('i686-linux', 3);"
|
||||
|
||||
* Creating a user:
|
||||
|
||||
$ hydra-create-user root --email-address 'e.dolstra@tudelft.nl' \
|
||||
--password-prompt
|
||||
$ sqlite3 hydra.sqlite "insert into Users(userName, emailAddress, password) values('root', 'e.dolstra@tudelft.nl', '$(echo -n foobar | sha1sum | cut -c1-40)');"
|
||||
|
||||
(Replace "foobar" with the desired password.)
|
||||
|
||||
To make the user an admin:
|
||||
|
||||
$ hydra-create-user root --role admin
|
||||
$ sqlite3 hydra.sqlite "insert into UserRoles(userName, role) values('root', 'admin');"
|
||||
|
||||
To enable a non-admin user to create projects:
|
||||
|
||||
$ sqlite3 hydra.sqlite "insert into UserRoles(userName, role) values('alice', 'create-projects');"
|
||||
|
||||
* Creating a release set:
|
||||
|
||||
$ hydra-create-user root --role create-projects
|
||||
insert into ReleaseSets(project, name) values('patchelf', 'unstable');
|
||||
insert into ReleaseSetJobs(isPrimary, project, release, job, attrs, description) values(1, 'patchelf', 'unstable', 'tarball', 'officialRelease=false', 'Source distribution');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'unstable', 'build', 'system=i686-linux', 'Build on i686-linux');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'unstable', 'build', 'system=x86_64-linux', 'Build on x86_64-linux');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description, mayFail) values('patchelf', 'unstable', 'rpm_fedora9i386', '', 'Fedora 9 (i386)', 1);
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'unstable', 'rpm_fedora10i386', '', 'Fedora 10 (i386)');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'unstable', 'deb_ubuntu804i386', '', 'Ubuntu 8.04 (i386)');
|
||||
|
||||
insert into ReleaseSets(project, name) values('patchelf', 'stable');
|
||||
insert into ReleaseSetJobs(isPrimary, project, release, job, attrs, description) values(1, 'patchelf', 'stable', 'tarball', 'officialRelease=true', 'Source distribution');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'stable', 'build', 'system=i686-linux', 'Build on i686-linux');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'stable', 'build', 'system=x86_64-linux', 'Build on x86_64-linux');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description, mayFail) values('patchelf', 'stable', 'rpm_fedora9i386', '', 'Fedora 9 (i386)', 1);
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'stable', 'rpm_fedora10i386', '', 'Fedora 10 (i386)');
|
||||
insert into ReleaseSetJobs(project, release, job, attrs, description) values('patchelf', 'stable', 'deb_ubuntu804i386', '', 'Ubuntu 8.04 (i386)');
|
||||
|
||||
* Changing the priority of a scheduled build:
|
||||
|
||||
@@ -53,6 +70,48 @@
|
||||
- Start hydra_evaluator and hydra_queue_runner
|
||||
|
||||
|
||||
* Upgrade notes:
|
||||
|
||||
alter table builds add column longDescription text;
|
||||
alter table builds add column license text;
|
||||
alter table projects add column homepage text;
|
||||
alter table builds add column homepage text;
|
||||
alter table BuildProducts add column defaultPath text;
|
||||
alter table BuildResultInfo add column failedDepBuild integer;
|
||||
alter table BuildResultInfo add column failedDepStepNr integer;
|
||||
alter table ReleaseSetJobs add column jobset text not null default "trunk";
|
||||
=== (DB dump/load needed after Sqlite upgrade) ===
|
||||
insert into jobs(project, jobset, name, active) select distinct project, jobset, job, 0 from builds b where not exists (select 1 from jobs where project = b.project and jobset = b.jobset and name = b.job);
|
||||
|
||||
create index IndexBuildInputsByBuild on BuildInputs(build);
|
||||
create index IndexBuildInputsByDependency on BuildInputs(dependency);
|
||||
|
||||
create index IndexBuildsByTimestamp on Builds(timestamp);
|
||||
|
||||
alter table jobs add column disabled integer not null default 0;
|
||||
alter table builds add column maintainers text;
|
||||
|
||||
# Add the isCurrent column to Builds and use the obsolete Jobs.active to fill it in.
|
||||
alter table builds add column isCurrent integer default 0;
|
||||
update builds set isCurrent = 1 where id in (select max(id) from builds natural join (select distinct b.project, b.jobset, b.job, b.system from builds b join (select project, jobset, name from jobs where active = 1) j on b.project = j.project and b.jobset = j.jobset and b.job = j.name) b2 group by project, jobset, job, system);
|
||||
|
||||
alter table Jobsets add column enabled integer not null default 1;
|
||||
|
||||
# Releases -> Views.
|
||||
alter table ReleaseSets rename to Views;
|
||||
alter table ReleaseSetJobs rename to ViewJobs;
|
||||
alter table ViewJobs rename column release_ to view_;
|
||||
alter table ViewJobs drop column mayFail;
|
||||
alter table ViewJobs add column autorelease integer not null default 0;
|
||||
|
||||
alter table Builds add column nixExprInput text;
|
||||
alter table Builds add column nixExprPath text;
|
||||
|
||||
# Adding JobsetEvals.
|
||||
drop table JobsetInputHashes;
|
||||
(add JobsetEvals, JobsetEvalMembers)
|
||||
|
||||
|
||||
* Job selection:
|
||||
|
||||
php-sat:build [system = "i686-linux"]
|
||||
@@ -61,15 +120,19 @@
|
||||
--if system i686-linux --arg build {...}
|
||||
|
||||
|
||||
* Restart all aborted builds in a given evaluation (e.g. 820909):
|
||||
* Restarting a bunch of failed builds:
|
||||
|
||||
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus = 3 and exists (select 1 from jobsetevalmembers where eval = 820909 and build = id));
|
||||
$ sqlite3 hydra.sqlite "select x.id from builds x join buildresultinfo r on r.id = x.id where project = 'nixpkgs' and jobset = 'stdenv' and exists (select 1 from buildinputs where build = x.id and revision = 14806) and finished = 1 and buildstatus = 3" > ids
|
||||
|
||||
$ for i in $(cat ids); do echo $i; sqlite3 hydra.sqlite "begin transaction; insert into buildschedulinginfo (id, priority, busy, locker) values($i, 100, 0, ''); delete from buildresultinfo where id = $i; update builds set finished = 0 where id = $i; commit transaction;"; done
|
||||
|
||||
* Restart all builds in a given evaluation that had a build step time out:
|
||||
Or with Postgres:
|
||||
|
||||
> update builds set finished = 0 where id in (select id from builds where finished = 1 and buildstatus != 0 and exists (select 1 from jobsetevalmembers where eval = 926992 and build = id) and exists (select 1 from buildsteps where build = id and status = 7));
|
||||
(restarting all aborted builds with ID > 42000)
|
||||
$ psql -h buildfarm.st.ewi.tudelft.nl -U hydra hydra -t -c 'select x.id from builds x join buildresultinfo r on r.id = x.id where finished = 1 and buildstatus = 3 and x.id > 42000' > ids
|
||||
|
||||
$ for i in $(cat ids); do echo $i; PGPASSWORD=... psql -h buildfarm.st.ewi.tudelft.nl -U hydra hydra -t -c "begin transaction; insert into buildschedulinginfo (id, priority, busy, locker) values($i, 100, 0, ''); delete from buildresultinfo where id = $i; update builds set finished = 0 where id = $i; commit transaction;"; done
|
||||
|
||||
|
||||
* select * from (select project, jobset, job, system, max(timestamp) timestamp from builds where finished = 1 group by project, jobset, job, system) x join builds y on x.timestamp = y.timestamp and x.project = y.project and x.jobset = y.jobset and x.job = y.job and x.system = y.system;
|
||||
|
||||
@@ -78,9 +141,14 @@
|
||||
|
||||
* Delete all scheduled builds that are not already building:
|
||||
|
||||
delete from builds where finished = 0 and not exists (select 1 from buildschedulinginfo s where s.id = builds.id and busy != 0);
|
||||
delete from builds where finished = 0 and not exists (select 1 from buildschedulinginfo s where s.id = builds.id and busy = 1);
|
||||
|
||||
|
||||
* Installing deps.nix in a profile for testing:
|
||||
|
||||
$ nix-env -p $NIX_USER_PROFILE_DIR/hydra-deps -f deps.nix -i \* --arg pkgs 'import /etc/nixos/nixpkgs {}'
|
||||
|
||||
|
||||
* select x.project, x.jobset, x.job, x.system, x.id, x.timestamp, r.buildstatus, b.id, b.timestamp
|
||||
from (select project, jobset, job, system, max(id) as id from Builds where finished = 1 group by project, jobset, job, system) as a_
|
||||
natural join Builds x
|
||||
@@ -91,7 +159,7 @@
|
||||
where x.project = c.project and x.jobset = c.jobset and x.job = c.job and x.system = c.system
|
||||
and x.id > c.id and r.buildstatus != r2.buildstatus);
|
||||
|
||||
* Using PostgreSQL (version 9.2 or newer is required):
|
||||
* Using PostgreSQL:
|
||||
|
||||
$ HYDRA_DBI="dbi:Pg:dbname=hydra;" hydra-server
|
||||
|
||||
@@ -110,13 +178,3 @@
|
||||
succeed in the nixpkgs:trunk jobset:
|
||||
|
||||
select job, system from builds b natural join buildresultinfo where project = 'nixpkgs' and jobset = 'stdenv' and iscurrent = 1 and finished = 1 and buildstatus != 0 and exists (select 1 from builds natural join buildresultinfo where project = 'nixpkgs' and jobset = 'trunk' and job = b.job and system = b.system and iscurrent = 1 and finished = 1 and buildstatus = 0) order by job, system;
|
||||
|
||||
|
||||
* Get all Nixpkgs jobs that have never built succesfully:
|
||||
|
||||
select project, jobset, job from builds b1
|
||||
where project = 'nixpkgs' and jobset = 'trunk' and iscurrent = 1
|
||||
group by project, jobset, job
|
||||
having not exists
|
||||
(select 1 from builds b2 where b1.project = b2.project and b1.jobset = b2.jobset and b1.job = b2.job and finished = 1 and buildstatus = 0)
|
||||
order by project, jobset, job;
|
||||
|
||||
53
doc/manual/Makefile.am
Normal file
53
doc/manual/Makefile.am
Normal file
@@ -0,0 +1,53 @@
|
||||
DOCBOOK_FILES = installation.xml introduction.xml manual.xml projects.xml
|
||||
|
||||
EXTRA_DIST = $(DOCBOOK_FILES)
|
||||
|
||||
xsltproc_opts = \
|
||||
--param html.stylesheet \'style.css\' \
|
||||
--param callout.graphics.extension \'.gif\'
|
||||
|
||||
dblatex_opts = \
|
||||
-V \
|
||||
-P doc.collab.show=0 \
|
||||
-P latex.output.revhistory=0
|
||||
|
||||
# Include the manual in the tarball.
|
||||
dist_html_DATA = manual.html style.css
|
||||
dist_pdf_DATA = manual.pdf
|
||||
|
||||
# Embed Docbook's callout images in the distribution.
|
||||
EXTRA_DIST += images
|
||||
|
||||
manual.html: $(DOCBOOK_FILES)
|
||||
if test "$(XSLTPROC)" != ""; then \
|
||||
$(XSLTPROC) $(xsltproc_opts) --nonet --xinclude \
|
||||
--output manual.html \
|
||||
$(docbookxsl)/html/docbook.xsl manual.xml; \
|
||||
else \
|
||||
echo "Please install xsltproc and rerun configure."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
manual.pdf: $(DOCBOOK_FILES)
|
||||
if test "$(DBLATEX)" != ""; then \
|
||||
$(DBLATEX) $(dblatex_opts) manual.xml; \
|
||||
else \
|
||||
echo "Please install dblatex and rerun configure."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
images:
|
||||
$(MKDIR_P) images/callouts
|
||||
if cp $(docbookxsl)/images/callouts/*.gif images/callouts; then \
|
||||
chmod +wx images images/callouts; \
|
||||
else \
|
||||
echo "Please install Docbook XSL and try again."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
install-data-hook: images
|
||||
$(INSTALL) -d $(DESTDIR)$(htmldir)/images/callouts
|
||||
$(INSTALL_DATA) images/callouts/* $(DESTDIR)$(htmldir)/images/callouts
|
||||
|
||||
distclean-hook:
|
||||
-rm -rf images
|
||||
238
doc/manual/installation.xml
Normal file
238
doc/manual/installation.xml
Normal file
@@ -0,0 +1,238 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-installation">
|
||||
|
||||
<title>Installation</title>
|
||||
|
||||
<para>
|
||||
This chapter explains how to install Hydra on your own build farm server.
|
||||
</para>
|
||||
|
||||
<section>
|
||||
<title>Prerequisites</title>
|
||||
<para>
|
||||
To install and use Hydra you need to have installed the following dependencies:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>Nix</listitem>
|
||||
<listitem>either PostgreSQL or SQLite</listitem>
|
||||
<listitem>many Perl packages, notably Catalyst, EmailSender,
|
||||
and NixPerl (see the <link
|
||||
xlink:href="https://svn.nixos.org/repos/nix/nixpkgs/trunk/pkgs/development/tools/misc/hydra/default.nix">Hydra
|
||||
expression in Nixpkgs</link> for the complete
|
||||
list).</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
At the moment, Hydra runs only on GNU/Linux
|
||||
(<emphasis>i686-linux</emphasis> and
|
||||
<emphasis>x86_64_linux</emphasis>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For small projects, Hydra can be run on any reasonably modern
|
||||
machine. For individual projects you can even run Hydra on a
|
||||
laptop. However, the charm of a buildfarm server is usually that
|
||||
it operates without disturbing the developer's working
|
||||
environment and can serve releases over the internet. In
|
||||
conjunction you should typically have your source code
|
||||
administered in a version management system, such as
|
||||
subversion. Therefore, you will probably want to install a
|
||||
server that is connected to the internet. To scale up to large
|
||||
and/or many projects, you will need at least a considerable
|
||||
amount of diskspace to store builds. Since Hydra can schedule
|
||||
multiple simultaneous build jobs, it can be useful to have a
|
||||
multi-core machine, and/or attach multiple build machines in a
|
||||
network to the central Hydra server.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Of course we think it is a good idea to use the <a
|
||||
href="http://nixos.org/nixos">NixOS</a> GNU/Linux distribution
|
||||
for your buildfarm server. But this is not a requirement. The
|
||||
Nix software deployment system can be installed on any GNU/Linux
|
||||
distribution in parallel to the regular package management
|
||||
system. Thus, you can use Hydra on a Debian, Fedora, SuSE, or
|
||||
Ubuntu system.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Getting Nix</title>
|
||||
|
||||
<para>
|
||||
If your server runs NixOS you are all set to continue with
|
||||
installation of Hydra. Otherwise you first need to install Nix.
|
||||
The latest stable version can be found one <link
|
||||
xlink:href="http://nixos.org/nix/download.html">the Nix web
|
||||
site</link>, along with a manual, which includes installation
|
||||
instructions.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Installation</title>
|
||||
|
||||
<!--
|
||||
<para>
|
||||
Hydra can be installed using Nixpkgs:
|
||||
|
||||
<screen>
|
||||
nix-env -f /path/to/nixpkgs -iA hydra</screen>
|
||||
|
||||
This makes the tools available in your Nix user environment,
|
||||
<literal>$HOME/.nix-profile</literal> by default.
|
||||
</para>
|
||||
-->
|
||||
|
||||
<para>
|
||||
The latest development snapshot of Hydra can be installed
|
||||
by visiting the URL <link
|
||||
xlink:href="http://hydra.nixos.org/view/hydra/unstable"><literal>http://hydra.nixos.org/view/hydra/unstable</literal></link>
|
||||
and using the one-click install available at one of the build
|
||||
pages. You can also install Hydra through the channel by
|
||||
performing the following commands:
|
||||
|
||||
<screen>
|
||||
nix-channel --add http://hydra.nixos.org/jobset/hydra/trunk/channel/latest
|
||||
nix-channel --update
|
||||
nix-env -i hydra</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Command completion should reveal a number of command-line tools from Hydra:
|
||||
|
||||
<screen>
|
||||
hydra-build hydra-init hydra-update-gc-roots
|
||||
hydra-eval-jobs hydra-queue-runner
|
||||
hydra-evaluator hydra-server
|
||||
</screen>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Creating the database</title>
|
||||
<para>
|
||||
Hydra stores its results in a database, which can be a
|
||||
PostgreSQL or SQLite database. The latter is easier to setup,
|
||||
but the former scales better.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To setup a PostgreSQL database with <emphasis>hydra</emphasis>
|
||||
as database name and user name, issue the following commands on
|
||||
the PostgreSQL server:
|
||||
|
||||
<screen>
|
||||
createuser -S -D -R -P hydra
|
||||
createdb -O hydra hydra</screen>
|
||||
|
||||
Note that <emphasis>$prefix</emphasis> is the location of Hydra
|
||||
in the nix store.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Hydra uses an environment variable to know which database should
|
||||
be used, and a variable which point to a location that holds
|
||||
some state. To set these variables for a PostgreSQL database,
|
||||
add the following to the file <filename>~/.profile</filename> of
|
||||
the user running the Hydra services.
|
||||
|
||||
<screen>
|
||||
export HYDRA_DBI="dbi:Pg:dbname=hydra;host=dbserver.example.org;user=hydra;"
|
||||
export HYDRA_DATA=/var/lib/hydra</screen>
|
||||
|
||||
You can provide the username and password in the file
|
||||
<filename>~/.pgpass</filename>, e.g.
|
||||
|
||||
<screen>
|
||||
dbserver.example.org:*:hydra:hydra:password</screen>
|
||||
|
||||
Make sure that the <emphasis>HYDRA_DATA</emphasis> directory
|
||||
exists and is writable for the user which will run the Hydra
|
||||
services. For a SQLite database, the
|
||||
<varname>HYDRA_DBI</varname> should be set to something like
|
||||
<literal>dbi:SQLite:/path/to/hydra.sqlite</literal>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Having set these environment variables, you can now initialise
|
||||
the database by doing:
|
||||
<screen>
|
||||
hydra-init</screen>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To add a user <emphasis>root</emphasis> with
|
||||
<emphasis>admin</emphasis> privileges, execute:
|
||||
<screen>
|
||||
echo "INSERT INTO Users(userName, emailAddress, password) VALUES ('root', 'some@email.adress.com', '$(echo -n foobar | sha1sum | cut -c1-40)');" | psql hydra
|
||||
echo "INSERT INTO UserRoles(userName, role) values('root', 'admin');" | psql hydra</screen>
|
||||
|
||||
For SQLite the same commands can be used, with <command>psql
|
||||
hydra</command> replaced by <command>sqlite3
|
||||
/path/to/hydra.sqlite</command>.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Upgrading</title>
|
||||
|
||||
<para>If you're upgrading Hydra from a previous version, you
|
||||
should do the following to perform any necessary database schema migrations:
|
||||
<screen>
|
||||
hydra-init</screen>
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Getting Started</title>
|
||||
|
||||
<para>
|
||||
To start the Hydra web server, execute:
|
||||
<screen>
|
||||
hydra-server</screen>
|
||||
|
||||
When the server is started, you can browse to
|
||||
<ulink>http://localhost:3000/</ulink> to start configuring
|
||||
your Hydra instance.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <command>hydra-server</command> command launches the web
|
||||
server. There are two other processes that come into play:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
The <emphasis>evaluator</emphasis> is responsible for
|
||||
peridically evaluating job sets, checking out their
|
||||
dependencies off their version control systems (VCS), and
|
||||
queueing new builds if the result of the evaluation changed.
|
||||
It is launched by the <command>hydra-evaluator</command>
|
||||
command.
|
||||
</listitem>
|
||||
<listitem>
|
||||
The <emphasis>queue runner</emphasis> launches builds (using
|
||||
Nix) as they are queued by the evaluator, scheduling them
|
||||
onto the configured Nix hosts. It is launched using the
|
||||
<command>hydra-queue-runner</command> command.
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
All three processes must be running for Hydra to be fully
|
||||
functional, though it's possible to temporarily stop any one of
|
||||
them for maintenance purposes, for instance.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
267
doc/manual/introduction.xml
Normal file
267
doc/manual/introduction.xml
Normal file
@@ -0,0 +1,267 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-introduction">
|
||||
|
||||
<title>Introduction</title>
|
||||
|
||||
<section>
|
||||
<title>About Hydra</title>
|
||||
|
||||
<para>
|
||||
Hydra is a tool for continuous integration testing and software
|
||||
release that uses a purely functional language to describe build jobs
|
||||
and their dependencies. Continuous integration is a simple technique
|
||||
to improve the quality of the software development process. An
|
||||
automated system continuously or periodically checks out the source
|
||||
code of a project, builds it, runs tests, and produces reports for the
|
||||
developers. Thus, various errors that might accidentally be committed
|
||||
into the code base are automatically caught. Such a system allows
|
||||
more in-depth testing than what developers could feasibly do manually:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem> <emphasis>Portability testing</emphasis>: The
|
||||
software may need to be built and tested on many different
|
||||
platforms. It is infeasible for each developer to do this
|
||||
before every commit.
|
||||
</listitem>
|
||||
|
||||
<listitem> Likewise, many projects have very large test sets
|
||||
(e.g., regression tests in a compiler, or stress tests in a
|
||||
DBMS) that can take hours or days to run to completion.
|
||||
</listitem>
|
||||
|
||||
<listitem> Many kinds of static and dynamic analyses can be
|
||||
performed as part of the tests, such as code coverage runs and
|
||||
static analyses.
|
||||
</listitem>
|
||||
|
||||
<listitem> It may also be necessary to build many different
|
||||
<emphasis>variants</emphasis> of the software. For instance,
|
||||
it may be necessary to verify that the component builds with
|
||||
various versions of a compiler.
|
||||
</listitem>
|
||||
|
||||
<listitem> Developers typically use incremental building to
|
||||
test their changes (since a full build may take too long), but
|
||||
this is unreliable with many build management tools (such as
|
||||
Make), i.e., the result of the incremental build might differ
|
||||
from a full build.
|
||||
</listitem>
|
||||
|
||||
<listitem> It ensures that the software can be built from the
|
||||
sources under revision control. Users of version management
|
||||
systems such as CVS and Subversion often forget to place
|
||||
source files under revision control.
|
||||
</listitem>
|
||||
|
||||
<listitem> The machines on which the continuous integration
|
||||
system runs ideally provides a clean, well-defined build
|
||||
environment. If this environment is administered through
|
||||
proper SCM techniques, then builds produced by the system can
|
||||
be reproduced. In contrast, developer work environments are
|
||||
typically not under any kind of SCM control.
|
||||
</listitem>
|
||||
|
||||
<listitem> In large projects, developers often work on a
|
||||
particular component of the project, and do not build and test
|
||||
the composition of those components (again since this is
|
||||
likely to take too long). To prevent the phenomenon of ``big
|
||||
bang integration'', where components are only tested together
|
||||
near the end of the development process, it is important to
|
||||
test components together as soon as possible (hence
|
||||
<emphasis>continuous integration</emphasis>).
|
||||
</listitem>
|
||||
|
||||
<listitem> It allows software to be
|
||||
<emphasis>released</emphasis> by automatically creating
|
||||
packages that users can download and install. To do this
|
||||
manually represents an often prohibitive amount of work, as
|
||||
one may want to produce releases for many different platforms:
|
||||
e.g., installers for Windows and Mac OS X, RPM or Debian
|
||||
packages for certain Linux distributions, and so on.
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In its simplest form, a continuous integration tool sits in a
|
||||
loop building and releasing software components from a version
|
||||
management system. For each component, it performs the
|
||||
following tasks:
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<listitem>It obtains the latest version of the component's
|
||||
source code from the version management system.
|
||||
</listitem>
|
||||
|
||||
<listitem> It runs the component's build process (which
|
||||
presumably includes the execution of the component's test
|
||||
set).
|
||||
</listitem>
|
||||
|
||||
<listitem> It presents the results of the build (such as error
|
||||
logs and releases) to the developers, e.g., by producing a web
|
||||
page.
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
|
||||
Examples of continuous integration tools include Jenkins,
|
||||
CruiseControl Tinderbox, Sisyphus, Anthill and BuildBot. These
|
||||
tools have various limitations.
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<listitem> They do not manage the <emphasis>build
|
||||
environment</emphasis>. The build environment consists of the
|
||||
dependencies necessary to perform a build action, e.g.,
|
||||
compilers, libraries, etc. Setting up the environment is
|
||||
typically done manually, and without proper SCM control (so it
|
||||
may be hard to reproduce a build at a later time). Manual
|
||||
management of the environment scales poorly in the number of
|
||||
configurations that must be supported. For instance, suppose
|
||||
that we want to build a component that requires a certain
|
||||
compiler X. We then have to go to each machine and install X.
|
||||
If we later need a newer version of X, the process must be
|
||||
repeated all over again. An ever worse problem occurs if
|
||||
there are conflicting, mutually exclusive versions of the
|
||||
dependencies. Thus, simply installing the latest version is
|
||||
not an option. Of course, we can install these components in
|
||||
different directories and manually pass the appropriate paths
|
||||
to the build processes of the various components. But this is
|
||||
a rather tiresome and error-prone process.
|
||||
</listitem>
|
||||
|
||||
<listitem> They do not easily support <emphasis>variability in software
|
||||
systems</emphasis>. A system may have a great deal of build-time
|
||||
variability: optional functionality, whether to build a debug or
|
||||
production version, different versions of dependencies, and so on.
|
||||
(For instance, the Linux kernel now has over 2,600 build-time
|
||||
configuration switches.) It is therefore important that a continuous
|
||||
integration tool can easily select and test different instances from
|
||||
the configuration space of the system to reveal problems, such as
|
||||
erroneous interactions between features. In a continuous integration
|
||||
setting, it is also useful to test different combinations of versions
|
||||
of subsystems, e.g., the head revision of a component against stable
|
||||
releases of its dependencies, and vice versa, as this can reveal
|
||||
various integration problems.
|
||||
</listitem>
|
||||
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<emphasis>Hydra</emphasis>, is a continuous integration tool
|
||||
that solves these problems. It is built on top of the <link
|
||||
xlink:href="http://nixos.org/nix/">Nix package manager</link>,
|
||||
which has a purely functional language for describing package
|
||||
build actions and their dependencies. This allows the build
|
||||
environment for projects to be produced automatically and
|
||||
deterministically, and variability in components to be expressed
|
||||
naturally using functions; and as such is an ideal fit for a
|
||||
continuous build system.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>About Us</title>
|
||||
|
||||
<para>
|
||||
Hydra is the successor of the Nix Buildfarm, which was developed
|
||||
in tandem with the Nix software deployment system. Nix was
|
||||
originally developed at the Department of Information and
|
||||
Computing Sciences, Utrecht University by the TraCE project
|
||||
(2003-2008). The project was funded by the Software Engineering
|
||||
Research Program Jacquard to improve the support for variability
|
||||
in software systems. Funding for the development of Nix and
|
||||
Hydra is now provided by the NIRICT LaQuSo Build Farm project.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>About this Manual</title>
|
||||
|
||||
<para>
|
||||
This manual tells you how to install the Hydra buildfarm
|
||||
software on your own server and how to operate that server using
|
||||
its web interface.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
|
||||
<section>
|
||||
<title>License</title>
|
||||
|
||||
<para>
|
||||
Hydra is free software: you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License as
|
||||
published by the Free Software Foundation, either version 3 of
|
||||
the License, or (at your option) any later version.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Hydra is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
<link xlink:href="http://www.gnu.org/licenses/">GNU General
|
||||
Public License</link> for more details.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Hydra at <literal>nixos.org</literal></title>
|
||||
|
||||
<para>
|
||||
The <literal>nixos.org</literal> installation of Hydra runs at
|
||||
<link
|
||||
xlink:href="http://hydra.nixos.org/"><literal>http://hydra.nixos.org/</literal></link>.
|
||||
|
||||
That installation is used to build software components from the
|
||||
<link xlink:href="http://nixos.org">Nix</link>,
|
||||
<link xlink:href="http://nixos.org/nixos">NixOS</link>,
|
||||
<link xlink:href="http://www.gnu.org/">GNU</link>,
|
||||
<link xlink:href="http://strategoxt.org">Stratego/XT</link>,
|
||||
and related projects.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you are one of the developers on those projects, it is likely
|
||||
that you will be using the NixOS Hydra server in some way. If
|
||||
you need to administer automatic builds for your project, you
|
||||
should pull the right strings to get an account on the
|
||||
server. This manual will tell you how to set up new projects and
|
||||
build jobs within those projects and write a release.nix file to
|
||||
describe the build process of your project to Hydra. You can
|
||||
skip the next chapter.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If your project does not yet have automatic builds within the
|
||||
NixOS Hydra server, it may actually be eligible. We are in the
|
||||
process of setting up a large buildfarm that should be able to
|
||||
support open source and academic software projects. Get in
|
||||
touch.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Hydra on your own buildfarm</title>
|
||||
|
||||
<para>
|
||||
If you need to run your own Hydra installation, <xref
|
||||
linkend="chap-installation" /> explains how to download and
|
||||
install the system on your own server.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
69
doc/manual/manual.xml
Normal file
69
doc/manual/manual.xml
Normal file
@@ -0,0 +1,69 @@
|
||||
<book xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude">
|
||||
|
||||
<info>
|
||||
|
||||
<title>Hydra User's Guide</title>
|
||||
|
||||
<subtitle>Draft</subtitle>
|
||||
|
||||
<authorgroup>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Eelco</firstname>
|
||||
<surname>Dolstra</surname>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>Delft University of Technology</orgname>
|
||||
<orgdiv>Department of Software Technology</orgdiv>
|
||||
</affiliation>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Rob</firstname>
|
||||
<surname>Vermaas</surname>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>Delft University of Technology</orgname>
|
||||
<orgdiv>Department of Software Technology</orgdiv>
|
||||
</affiliation>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Eelco</firstname>
|
||||
<surname>Visser</surname>
|
||||
</personname>
|
||||
<affiliation>
|
||||
<orgname>Delft University of Technology</orgname>
|
||||
<orgdiv>Department of Software Technology</orgdiv>
|
||||
</affiliation>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
<author>
|
||||
<personname>
|
||||
<firstname>Ludovic</firstname>
|
||||
<surname>Courtès</surname>
|
||||
</personname>
|
||||
<contrib>Author</contrib>
|
||||
</author>
|
||||
</authorgroup>
|
||||
|
||||
|
||||
<copyright>
|
||||
<year>2009</year>
|
||||
<year>2010</year>
|
||||
<holder>Eelco Dolstra</holder>
|
||||
</copyright>
|
||||
|
||||
<date>March 2010</date>
|
||||
|
||||
</info>
|
||||
|
||||
<xi:include href="introduction.xml" />
|
||||
<xi:include href="installation.xml" />
|
||||
<xi:include href="projects.xml" />
|
||||
|
||||
|
||||
</book>
|
||||
@@ -1,36 +0,0 @@
|
||||
srcs = files(
|
||||
'src/SUMMARY.md',
|
||||
'src/about.md',
|
||||
'src/api.md',
|
||||
'src/configuration.md',
|
||||
'src/hacking.md',
|
||||
'src/installation.md',
|
||||
'src/introduction.md',
|
||||
'src/jobs.md',
|
||||
'src/monitoring/README.md',
|
||||
'src/notifications.md',
|
||||
'src/plugins/README.md',
|
||||
'src/plugins/RunCommand.md',
|
||||
'src/plugins/declarative-projects.md',
|
||||
'src/projects.md',
|
||||
'src/webhooks.md',
|
||||
)
|
||||
|
||||
manual = custom_target(
|
||||
'manual',
|
||||
command: [
|
||||
mdbook,
|
||||
'build',
|
||||
'@SOURCE_ROOT@/doc/manual',
|
||||
'-d', meson.current_build_dir() / 'html'
|
||||
],
|
||||
depend_files: srcs,
|
||||
output: ['html'],
|
||||
build_by_default: true,
|
||||
)
|
||||
|
||||
install_subdir(
|
||||
manual.full_path(),
|
||||
install_dir: get_option('datadir') / 'doc/hydra',
|
||||
strip_directory: true,
|
||||
)
|
||||
523
doc/manual/projects.xml
Normal file
523
doc/manual/projects.xml
Normal file
@@ -0,0 +1,523 @@
|
||||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xml:id="chap-projects">
|
||||
|
||||
<title>Creating and Managing Projects</title>
|
||||
|
||||
<para>
|
||||
Once Hydra is installed and running, the next step is to add
|
||||
projects to the build farm. We follow the example of the <link
|
||||
xlink:href="http://nixos.org/patchelf.html">Patchelf
|
||||
project</link>, a software tool written in C and using the GNU
|
||||
Build System (GNU Autoconf and GNU Automake).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Log in to the web interface of your Hydra installation using the
|
||||
user name and password you inserted in the database (by default,
|
||||
Hydra's web server listens on <link
|
||||
xlink:href="http://localhost:3000/"><literal>localhost:3000</literal></link>).
|
||||
Then follow the "Create Project" link to create a new project.
|
||||
</para>
|
||||
|
||||
<section>
|
||||
<title>Project Information</title>
|
||||
|
||||
<para>
|
||||
A project definition consists of some general information and a
|
||||
set of job sets. The general information identifies a project,
|
||||
its owner, and current state of activity.
|
||||
|
||||
Here's what we fill in for the patchelf project:
|
||||
|
||||
<screen>
|
||||
Identifier: patchelf
|
||||
</screen>
|
||||
|
||||
The <emphasis>identifier</emphasis> is the identity of the
|
||||
project. It is used in URLs and in the names of build results.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The identifier should be a unique name (it is the primary
|
||||
database key for the project table in the database). If you try
|
||||
to create a project with an already existing identifier you'd
|
||||
get an error message such as:
|
||||
|
||||
<screen>
|
||||
I'm very sorry, but an error occurred:
|
||||
DBIx::Class::ResultSet::create(): DBI Exception: DBD::SQLite::st execute failed: column name is not unique(19) at dbdimp.c line 402
|
||||
</screen>
|
||||
|
||||
So try to create the project after entering just the general
|
||||
information to figure out if you have chosen a unique name.
|
||||
Job sets can be added once the project has been created.
|
||||
|
||||
<screen>
|
||||
Display name: Patchelf
|
||||
</screen>
|
||||
|
||||
The <emphasis>display name</emphasis> is used in menus.
|
||||
|
||||
<screen>
|
||||
Description: A tool for modifying ELF binaries
|
||||
</screen>
|
||||
|
||||
The <emphasis>description</emphasis> is used as short
|
||||
documentation of the nature of the project.
|
||||
|
||||
<screen>
|
||||
Owner: eelco
|
||||
</screen>
|
||||
|
||||
The <emphasis>owner</emphasis> of a project can create and edit
|
||||
job sets.
|
||||
|
||||
<screen>
|
||||
Enabled: Yes
|
||||
</screen>
|
||||
|
||||
Only if the project is <emphasis>enabled</emphasis> are builds
|
||||
performed.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once created there should be an entry for the project in the
|
||||
sidebar. Go to the project page for the <link
|
||||
xlink:href="http://localhost:3000/project/patchelf">Patchelf</link>
|
||||
project.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Job Sets</title>
|
||||
|
||||
<para>
|
||||
A project can consist of multiple <emphasis>job sets</emphasis>
|
||||
(hereafter <emphasis>jobsets</emphasis>), separate tasks that
|
||||
can be built separately, but may depend on each other (without
|
||||
cyclic dependencies, of course). Go to the <link
|
||||
xlink:href="http://localhost:3000/project/patchelf/edit">Edit</link>
|
||||
page of the Patchelf project and "Add a new jobset" by providing
|
||||
the following "Information":
|
||||
|
||||
<screen>
|
||||
Identifier: trunk
|
||||
Description: Trunk
|
||||
Nix expression: release.nix in input patchelfSrc
|
||||
</screen>
|
||||
|
||||
This states that in order to build the <literal>trunk</literal>
|
||||
jobset, the Nix expression in the file
|
||||
<filename>release.nix</filename>, which can be obtained from
|
||||
input <literal>patchelfSrc</literal>, should be
|
||||
evaluated. (We'll have a look at
|
||||
<filename>release.nix</filename> later.)
|
||||
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To realize a job we probably need a number of inputs, which can
|
||||
be declared in the table below. As many inputs as required can
|
||||
be added. For patchelf we declare the following inputs.
|
||||
|
||||
<screen>
|
||||
patchelfSrc
|
||||
'Subversion checkout' https://svn.nixos.org/repos/nix/patchelf/trunk
|
||||
|
||||
nixpkgs 'Subversion checkout' https://svn.nixos.org/repos/nix/nixpkgs/trunk
|
||||
|
||||
officialRelease Boolean false
|
||||
|
||||
system String value "i686-linux"
|
||||
</screen>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Release Set</title>
|
||||
|
||||
<!-- TODO -->
|
||||
there must be one primary job
|
||||
|
||||
check the radio button of exactly one job
|
||||
|
||||
https://svn.nixos.org/repos/nix/nixpkgs/trunk
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Building Jobs</title>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Build Recipes</title>
|
||||
|
||||
<para>
|
||||
Build jobs and <emphasis>build recipes</emphasis> for a jobset are
|
||||
specified in a text file written in the <link
|
||||
xlink:href="http://nixos.org/nix/">Nix language</link>. The
|
||||
recipe is actually called a <emphasis>Nix expression</emphasis> in
|
||||
Nix parlance. By convention this file is often called
|
||||
<filename>release.nix</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <filename>release.nix</filename> file is typically kept under
|
||||
version control, and the repository that contains it one of the
|
||||
build inputs of the corresponding–often called
|
||||
<literal>hydraConfig</literal> by convention. The repository for
|
||||
that file and the actual file name are specified on the web
|
||||
interface of Hydra under the <literal>Setup</literal> tab of the
|
||||
jobset's overview page, under the <literal>Nix
|
||||
expression</literal> heading. See, for example, the <link
|
||||
xlink:href="http://hydra.nixos.org/jobset/patchelf/trunk">jobset
|
||||
overview page</link> of the PatchELF project, and <link
|
||||
xlink:href="https://svn.nixos.org/repos/nix/patchelf/trunk/release.nix">
|
||||
the corresponding Nix file</link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Knowledge of the Nix language is recommended, but the example
|
||||
below should already give a good idea of how it works:
|
||||
</para>
|
||||
|
||||
<example xml:id='ex-hello'>
|
||||
<title><filename>release.nix</filename> file for GNU Hello</title>
|
||||
<programlisting>
|
||||
{ nixpkgs }: <co xml:id='ex-hello-co-nixpkgs' />
|
||||
|
||||
let
|
||||
pkgs = import nixpkgs {}; <co xml:id='ex-hello-co-import-nixpkgs' />
|
||||
|
||||
jobs = rec { <co xml:id='ex-hello-co-jobs' />
|
||||
|
||||
tarball = <co xml:id='ex-hello-co-tarball' />
|
||||
{ helloSrc }: <co xml:id='ex-hello-co-tarball-args' />
|
||||
|
||||
pkgs.releaseTools.sourceTarball { <co xml:id='ex-hello-co-source-tarball' />
|
||||
name = "hello-tarball";
|
||||
src = helloSrc;
|
||||
buildInputs = (with pkgs; [ gettext texLive texinfo ]);
|
||||
};
|
||||
|
||||
build = <co xml:id='ex-hello-co-build' />
|
||||
{ tarball ? jobs.tarball {} <co xml:id='ex-hello-co-build-args' />
|
||||
, system ? builtins.currentSystem
|
||||
}:
|
||||
|
||||
let pkgs = import nixpkgs { inherit system; }; in
|
||||
pkgs.releaseTools.nixBuild { <co xml:id='ex-hello-co-nix-build' />
|
||||
name = "hello" ;
|
||||
src = tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
};
|
||||
in
|
||||
jobs <co xml:id='ex-hello-co-body' />
|
||||
</programlisting>
|
||||
</example>
|
||||
|
||||
<para>
|
||||
<xref linkend='ex-hello' /> shows what a
|
||||
<filename>release.nix</filename> file for <link
|
||||
xlink:href="http://www.gnu.org/software/hello/">GNU Hello</link>
|
||||
would you like. GNU Hello is representative of many GNU
|
||||
and non-GNU free software projects:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>it uses the GNU Build System, namely GNU Autoconf,
|
||||
and GNU Automake; for users, it means it can be installed
|
||||
using the <link
|
||||
xlink:href="http://www.gnu.org/prep/standards/html_node/Managing-Releases.html">usual
|
||||
<literal>./configure && make install</literal>
|
||||
procedure</link>;
|
||||
</listitem>
|
||||
<listitem>it uses Gettext for internationalization;</listitem>
|
||||
<listitem>it has a Texinfo manual, which can be rendered as PDF
|
||||
with TeX.</listitem>
|
||||
</itemizedlist>
|
||||
|
||||
The file defines a jobset consisting of two jobs:
|
||||
<literal>tarball</literal>, and <literal>build</literal>. It
|
||||
contains the following elements (referenced from the figure by
|
||||
numbers):
|
||||
|
||||
<calloutlist>
|
||||
|
||||
<callout arearefs='ex-hello-co-nixpkgs'>
|
||||
<para>
|
||||
This specifies a function of one named arguments,
|
||||
<varname>nixpkgs</varname>. This function and those
|
||||
defined below is called by Hydra. Here the
|
||||
<varname>nixpkgs</varname> argument is meant to be a
|
||||
checkout of the <link
|
||||
xlink:href="http://nixos.org/nixpkgs/">Nixpkgs</link>
|
||||
software distribution.
|
||||
</para>
|
||||
<para>
|
||||
Hydra inspects the formal argument list of the function
|
||||
(here, the <varname>nixpkgs</varname> argument) and passes
|
||||
it the corresponding parameter specified as a build input
|
||||
on Hydra's web interface. In this case, the web interface
|
||||
should show a <varname>nixpkgs</varname> build input,
|
||||
which is a checkout of the Nixpkgs source code repository.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-import-nixpkgs'>
|
||||
<para>
|
||||
This defines a variable <varname>pkgs</varname> holding
|
||||
the set of packages provided by Nixpkgs.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-jobs'>
|
||||
<para>
|
||||
This defines a variable holding the two Hydra
|
||||
jobs–an <emphasis>attribute set</emphasis> in Nix.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-tarball'>
|
||||
<para>
|
||||
This is the definition of the first job, named
|
||||
<varname>tarball</varname>. The purpose of this job is to
|
||||
produce a usable source code tarball.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-tarball-args'>
|
||||
<para>
|
||||
The <varname>tarball</varname> takes an additional
|
||||
argument called <varname>helloSrc</varname>. Again, this
|
||||
argument is passed by Hydra and is meant to be a checkout
|
||||
of GNU Hello's source code repository.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-source-tarball'>
|
||||
<para>
|
||||
The <varname>tarball</varname> job calls the
|
||||
<varname>sourceTarball</varname> function, which (roughly)
|
||||
runs <command>autoreconf && ./configure &&
|
||||
make dist</command> on the checkout. The
|
||||
<varname>buildInputs</varname> attribute specifies
|
||||
additional software dependencies for the
|
||||
job<footnote><para>The package names used in
|
||||
<varname>buildInputs</varname>–e.g.,
|
||||
<varname>texLive</varname>–are the names of the
|
||||
<emphasis>attributes</emphasis> corresponding to these
|
||||
packages in Nixpkgs, specifically in the <link
|
||||
xlink:href="https://svn.nixos.org/repos/nix/nixpkgs/trunk/pkgs/top-level/all-packages.nix"><filename>all-packages.nix</filename></link>
|
||||
file. See the section entitled “Package Naming” in the
|
||||
Nixpkgs manual for more information.</para></footnote>.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-build'>
|
||||
<para>
|
||||
This is the definition of the <varname>build</varname>
|
||||
job, whose purpose is to build Hello from the tarball
|
||||
produced above.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-build-args'>
|
||||
<para>
|
||||
The <varname>build</varname> function takes two additional
|
||||
parameter: <varname>tarball</varname>, which is meant to
|
||||
be the result of the <varname>tarball</varname> job, and
|
||||
<varname>system</varname>, which should be a string
|
||||
defining the Nix system type–e.g.,
|
||||
<literal>"x86_64-linux"</literal>.
|
||||
</para>
|
||||
<para>
|
||||
Again, these parameters are passed by Hydra when it calls
|
||||
<varname>build</varname>. Thus, they must be defined as
|
||||
build inputs in Hydra: <varname>tarball</varname> should
|
||||
have type <literal>Build Output</literal>, its value being
|
||||
the latest output of the <varname>tarball</varname> job,
|
||||
and <varname>system</varname> should be a string.
|
||||
</para>
|
||||
<para>
|
||||
The question mark after <literal>tarball</literal> and
|
||||
<literal>system</literal> defines default values for these
|
||||
arguments, and is only useful for debugging.
|
||||
</para>
|
||||
</callout>
|
||||
<callout arearefs='ex-hello-co-nix-build'>
|
||||
<para>
|
||||
The <varname>build</varname> job calls the
|
||||
<varname>nixBuild</varname> function, which unpacks the
|
||||
tarball, then runs <command>./configure && make
|
||||
&& make check && make install</command>.
|
||||
</para>
|
||||
</callout>
|
||||
|
||||
<callout arearefs='ex-hello-co-body'>
|
||||
<para>
|
||||
Finally, the set of jobs is returned to Hydra, as a Nix
|
||||
attribute set.
|
||||
</para>
|
||||
</callout>
|
||||
</calloutlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Building from the Command Line</title>
|
||||
|
||||
<para>
|
||||
It is often useful to test a build recipe, for instance before
|
||||
it is actually used by Hydra, when testing changes, or when
|
||||
debugging a build issue. Since build recipes for Hydra jobsets
|
||||
are just plain Nix expressions, they can be evaluated using the
|
||||
standard Nix tools.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To evaluate the <varname>tarball</varname> jobset of <xref
|
||||
linkend='ex-hello' />, just run:
|
||||
|
||||
<screen>
|
||||
$ nix-build release.nix -A tarball
|
||||
</screen>
|
||||
|
||||
However, doing this with <xref linkend='ex-hello' /> as is will
|
||||
probably yield an error like this:
|
||||
|
||||
<screen>
|
||||
error: cannot auto-call a function that has an argument without a default value (`nixpkgs')
|
||||
</screen>
|
||||
|
||||
This is because no value was specified for the
|
||||
<varname>nixpkgs</varname> argument of the Nix expression.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This is fixed by providing a default value for that argument in
|
||||
the Nix expression, which will allow <command>nix-build</command>
|
||||
to auto-call the function: instfead of writing <literal>{ nixpkgs
|
||||
}:</literal>, we now write <literal>{ nixpkgs ? <nixpkgs>
|
||||
}:</literal>. What it means is that, by default, the
|
||||
<varname>nixpkgs</varname> variable will be bound to the absolute
|
||||
path of any <filename>nixpkgs</filename> file found in the Nix
|
||||
search path. Similarly, a default value for
|
||||
<varname>helloSrc</varname> needs to be provided.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Thus, assuming a checkout of Nixpkgs is available under
|
||||
<filename>$HOME/src/nixpkgs</filename>, the
|
||||
<varname>tarball</varname> jobset can now be evaluated by running:
|
||||
|
||||
<screen>
|
||||
$ nix-build -I ~/src release.nix -A tarball
|
||||
</screen>
|
||||
|
||||
Similarly, the <varname>build</varname> jobset can be evaluated:
|
||||
|
||||
<screen>
|
||||
$ nix-build -I ~/src release.nix -A build
|
||||
</screen>
|
||||
|
||||
The <varname>build</varname> job reuses the result of the
|
||||
<varname>tarball</varname> job, rebuilding it only if it needs to.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Adding More Jobs</title>
|
||||
|
||||
<para>
|
||||
<xref linkend='ex-hello' /> illustrates how to write the most
|
||||
basic jobs, <varname>tarball</varname> and
|
||||
<varname>build</varname>. In practice, much more can be done by
|
||||
using features readily provided by Nixpkgs or by creating new jobs
|
||||
as customizations of existing jobs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For instance, test coverage report for projects compiled with GCC
|
||||
can be automatically generated using the
|
||||
<varname>coverageAnalysis</varname> function provided by Nixpkgs
|
||||
instead of <varname>nixBuild</varname>. Back to our GNU Hello
|
||||
example, we can define a <varname>coverage</varname> job that
|
||||
produces an HTML code coverage report directly readable from the
|
||||
corresponding Hydra build page:
|
||||
|
||||
<programlisting>
|
||||
coverage =
|
||||
{ tarball ? jobs.tarball {}
|
||||
, system ? builtins.currentSystem
|
||||
}:
|
||||
|
||||
let pkgs = import nixpkgs { inherit system; }; in
|
||||
pkgs.releaseTools.coverageAnalysis {
|
||||
name = "hello" ;
|
||||
src = tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
}; </programlisting>
|
||||
|
||||
As can be seen, the only difference compared to
|
||||
<varname>build</varname> is the use of
|
||||
<varname>coverageAnalysis</varname>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Nixpkgs provides many more build tools, including the ability to
|
||||
run build in virtual machines, which can themselves run another
|
||||
GNU/Linux distribution, which allows for the creation of packages
|
||||
for these distributions. Please see <link
|
||||
xlink:href="https://svn.nixos.org/repos/nix/nixpkgs/trunk/pkgs/build-support/release/">the
|
||||
<filename>pkgs/build-support/release</filename> directory</link>
|
||||
of Nixpkgs for more. The NixOS manual also contains information
|
||||
about whole-system testing in virtual machine.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Now, assume we want to build Hello with an old version of GCC, and
|
||||
with different <command>configure</command> flags. A new
|
||||
<varname>build_exotic</varname> job can be written that simply
|
||||
<emphasis>overrides</emphasis> the relevant arguments passed to
|
||||
<varname>nixBuild</varname>:
|
||||
|
||||
<programlisting>
|
||||
build_exotic =
|
||||
{ tarball ? jobs.tarball {}
|
||||
, system ? builtins.currentSystem
|
||||
}:
|
||||
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system; };
|
||||
build = jobs.build { inherit tarball system; };
|
||||
in
|
||||
pkgs.lib.overrideDerivation build (attrs: {
|
||||
buildInputs = [ pkgs.gcc33 ];
|
||||
preConfigure = "gcc --version";
|
||||
configureFlags =
|
||||
attrs.configureFlags ++ [ "--disable-nls" ];
|
||||
}); </programlisting>
|
||||
|
||||
The <varname>build_exotic</varname> job reuses
|
||||
<varname>build</varname> and overrides some of its arguments: it
|
||||
adds a dependency on GCC 3.3, a pre-configure phase that runs
|
||||
<command>gcc --version</command>, and adds the
|
||||
<literal>--disable-nls</literal> configure flags.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This customization mechanism is very powerful. For instance, it
|
||||
can be used to change the way Hello and <emphasis>all</emphasis>
|
||||
its dependencies–including the C library and compiler used to
|
||||
build it–are built. See the Nixpkgs manual for more.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
|
||||
</chapter>
|
||||
|
||||
<!--
|
||||
Local Variables:
|
||||
indent-tabs-mode: nil
|
||||
ispell-local-dictionary: "american"
|
||||
End:
|
||||
-->
|
||||
@@ -1,20 +0,0 @@
|
||||
# Hydra User's Guide
|
||||
|
||||
- [Introduction](introduction.md)
|
||||
- [Installation](installation.md)
|
||||
- [Configuration](configuration.md)
|
||||
- [Creating and Managing Projects](projects.md)
|
||||
- [Hydra jobs](./jobs.md)
|
||||
- [Plugins](./plugins/README.md)
|
||||
- [Declarative Projects](./plugins/declarative-projects.md)
|
||||
- [RunCommand](./plugins/RunCommand.md)
|
||||
- [Using the external API](api.md)
|
||||
- [Webhooks](webhooks.md)
|
||||
- [Webhook Authentication Migration Guide](webhook-migration-guide.md)
|
||||
- [Monitoring Hydra](./monitoring/README.md)
|
||||
|
||||
## Developer's Guide
|
||||
- [Hacking](hacking.md)
|
||||
- [Hydra Notifications](notifications.md)
|
||||
-----------
|
||||
[About](about.md)
|
||||
@@ -1,6 +0,0 @@
|
||||
# Authors
|
||||
|
||||
* Eelco Dolstra, Delft University of Technology, Department of Software Technology
|
||||
* Rob Vermaas, Delft University of Technology, Department of Software Technology
|
||||
* Eelco Visser, Delft University of Technology, Department of Software Technology
|
||||
* Ludovic Courtès
|
||||
@@ -1,249 +0,0 @@
|
||||
Using the external API
|
||||
======================
|
||||
|
||||
To be able to create integrations with other services, Hydra exposes an
|
||||
external API that you can manage projects with.
|
||||
|
||||
The API is accessed over HTTP(s) where all data is sent and received as
|
||||
JSON.
|
||||
|
||||
Creating resources requires the caller to be authenticated, while
|
||||
retrieving resources does not.
|
||||
|
||||
The API does not have a separate URL structure for it\'s endpoints.
|
||||
Instead you request the pages of the web interface as `application/json`
|
||||
to use the API.
|
||||
|
||||
List projects
|
||||
-------------
|
||||
|
||||
To list all the `projects` of the Hydra install:
|
||||
|
||||
GET /
|
||||
Accept: application/json
|
||||
|
||||
This will give you a list of `projects`, where each `project` contains
|
||||
general information and a list of its `job sets`.
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org
|
||||
|
||||
**Note:** this response is truncated
|
||||
|
||||
GET https://hydra.nixos.org/
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
[
|
||||
{
|
||||
"displayname": "Acoda",
|
||||
"name": "acoda",
|
||||
"description": "Acoda is a tool set for automatic data migration along an evolving data model",
|
||||
"enabled": 0,
|
||||
"owner": "sander",
|
||||
"hidden": 1,
|
||||
"jobsets": [
|
||||
"trunk"
|
||||
]
|
||||
},
|
||||
{
|
||||
"displayname": "cabal2nix",
|
||||
"name": "cabal2nix",
|
||||
"description": "Convert Cabal files into Nix build instructions",
|
||||
"enabled": 0,
|
||||
"owner": "simons@cryp.to",
|
||||
"hidden": 1,
|
||||
"jobsets": [
|
||||
"master"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
Get a single project
|
||||
--------------------
|
||||
|
||||
To get a single `project` by identifier:
|
||||
|
||||
GET /project/:project-identifier
|
||||
Accept: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/project/hydra
|
||||
|
||||
GET https://hydra.nixos.org/project/hydra
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"description": "Hydra, the Nix-based continuous build system",
|
||||
"hidden": 0,
|
||||
"displayname": "Hydra",
|
||||
"jobsets": [
|
||||
"hydra-master",
|
||||
"hydra-ant-logger-trunk",
|
||||
"master",
|
||||
"build-ng"
|
||||
],
|
||||
"name": "hydra",
|
||||
"enabled": 1,
|
||||
"owner": "eelco"
|
||||
}
|
||||
|
||||
Get a single job set
|
||||
--------------------
|
||||
|
||||
To get a single `job set` by identifier:
|
||||
|
||||
GET /jobset/:project-identifier/:jobset-identifier
|
||||
Content-Type: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/jobset/hydra/build-ng
|
||||
|
||||
GET https://hydra.nixos.org/jobset/hydra/build-ng
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"errormsg": "evaluation failed due to signal 9 (Killed)",
|
||||
"fetcherrormsg": null,
|
||||
"nixexprpath": "release.nix",
|
||||
"nixexprinput": "hydraSrc",
|
||||
"emailoverride": "rob.vermaas@gmail.com, eelco.dolstra@logicblox.com",
|
||||
"jobsetinputs": {
|
||||
"officialRelease": {
|
||||
"jobsetinputalts": [
|
||||
"false"
|
||||
]
|
||||
},
|
||||
"hydraSrc": {
|
||||
"jobsetinputalts": [
|
||||
"https://github.com/NixOS/hydra.git build-ng"
|
||||
]
|
||||
},
|
||||
"nixpkgs": {
|
||||
"jobsetinputalts": [
|
||||
"https://github.com/NixOS/nixpkgs.git release-14.12"
|
||||
]
|
||||
}
|
||||
},
|
||||
"enabled": 0
|
||||
}
|
||||
|
||||
List evaluations
|
||||
----------------
|
||||
|
||||
To list the `evaluations` of a `job set` by identifier:
|
||||
|
||||
GET /jobset/:project-identifier/:jobset-identifier/evals
|
||||
Content-Type: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/jobset/hydra/build-ng/evals
|
||||
|
||||
**Note:** this response is truncated
|
||||
|
||||
GET https://hydra.nixos.org/jobset/hydra/build-ng/evals
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"evals": [
|
||||
{
|
||||
"jobsetevalinputs": {
|
||||
"nixpkgs": {
|
||||
"dependency": null,
|
||||
"type": "git",
|
||||
"value": null,
|
||||
"uri": "https://github.com/NixOS/nixpkgs.git",
|
||||
"revision": "f60e48ce81b6f428d072d3c148f6f2e59f1dfd7a"
|
||||
},
|
||||
"hydraSrc": {
|
||||
"dependency": null,
|
||||
"type": "git",
|
||||
"value": null,
|
||||
"uri": "https://github.com/NixOS/hydra.git",
|
||||
"revision": "48d6f0de2ab94f728d287b9c9670c4d237e7c0f6"
|
||||
},
|
||||
"officialRelease": {
|
||||
"dependency": null,
|
||||
"value": "false",
|
||||
"type": "boolean",
|
||||
"uri": null,
|
||||
"revision": null
|
||||
}
|
||||
},
|
||||
"hasnewbuilds": 1,
|
||||
"builds": [
|
||||
24670686,
|
||||
24670684,
|
||||
24670685,
|
||||
24670687
|
||||
],
|
||||
"id": 1213758
|
||||
}
|
||||
],
|
||||
"first": "?page=1",
|
||||
"last": "?page=1"
|
||||
}
|
||||
|
||||
Get a single build
|
||||
------------------
|
||||
|
||||
To get a single `build` by its id:
|
||||
|
||||
GET /build/:build-id
|
||||
Content-Type: application/json
|
||||
|
||||
**Example**
|
||||
|
||||
curl -i -H 'Accept: application/json' \
|
||||
https://hydra.nixos.org/build/24670686
|
||||
|
||||
GET /build/24670686
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"job": "tests.api.x86_64-linux",
|
||||
"jobsetevals": [
|
||||
1213758
|
||||
],
|
||||
"buildstatus": 0,
|
||||
"buildmetrics": null,
|
||||
"project": "hydra",
|
||||
"system": "x86_64-linux",
|
||||
"priority": 100,
|
||||
"releasename": null,
|
||||
"starttime": 1439402853,
|
||||
"nixname": "vm-test-run-unnamed",
|
||||
"timestamp": 1439388618,
|
||||
"id": 24670686,
|
||||
"stoptime": 1439403403,
|
||||
"jobset": "build-ng",
|
||||
"buildoutputs": {
|
||||
"out": {
|
||||
"path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed"
|
||||
}
|
||||
},
|
||||
"buildproducts": {
|
||||
"1": {
|
||||
"path": "/nix/store/lzrxkjc35mhp8w7r8h82g0ljyizfchma-vm-test-run-unnamed",
|
||||
"defaultpath": "log.html",
|
||||
"type": "report",
|
||||
"sha256hash": null,
|
||||
"filesize": null,
|
||||
"name": "",
|
||||
"subtype": "testlog"
|
||||
}
|
||||
},
|
||||
"finished": 1
|
||||
}
|
||||
@@ -1,310 +0,0 @@
|
||||
Configuration
|
||||
=============
|
||||
|
||||
This chapter is a collection of configuration snippets for different
|
||||
scenarios.
|
||||
|
||||
The configuration is parsed by `Config::General` which has [a pretty
|
||||
thorough documentation on their file format](https://metacpan.org/pod/Config::General#CONFIG-FILE-FORMAT).
|
||||
Hydra calls the parser with the following options:
|
||||
- `-UseApacheInclude => 1`
|
||||
- `-IncludeAgain => 1`
|
||||
- `-IncludeRelative => 1`
|
||||
|
||||
Including files
|
||||
---------------
|
||||
|
||||
`hydra.conf` supports Apache-style includes. This is **IMPORTANT**
|
||||
because that is how you keep your **secrets** out of the **Nix store**.
|
||||
Hopefully this got your attention 😌
|
||||
|
||||
This:
|
||||
```
|
||||
<github_authorization>
|
||||
NixOS = Bearer gha-secret😱secret😱secret😱
|
||||
</github_authorization>
|
||||
```
|
||||
should **NOT** be in `hydra.conf`.
|
||||
|
||||
`hydra.conf` is rendered in the Nix store and is therefore world-readable.
|
||||
|
||||
Instead, the above should be written to a file outside the Nix store by
|
||||
other means (manually, using Nixops' secrets feature, etc) and included
|
||||
like so:
|
||||
```
|
||||
Include /run/keys/hydra/github_authorizations.conf
|
||||
```
|
||||
|
||||
Serving behind reverse proxy
|
||||
----------------------------
|
||||
|
||||
To serve hydra web server behind reverse proxy like *nginx* or *httpd*
|
||||
some additional configuration must be made.
|
||||
|
||||
Edit your `hydra.conf` file in a similar way to this example:
|
||||
|
||||
```conf
|
||||
using_frontend_proxy 1
|
||||
base_uri example.com
|
||||
```
|
||||
|
||||
`base_uri` should be your hydra servers proxied URL. If you are using
|
||||
Hydra nixos module then setting `hydraURL` option should be enough.
|
||||
|
||||
You also need to configure your reverse proxy to pass `X-Request-Base`
|
||||
to hydra, with the same value as `base_uri`.
|
||||
This also covers the case of serving Hydra with a prefix path,
|
||||
as in [http://example.com/hydra]().
|
||||
|
||||
For example if you are using nginx, then use configuration similar to
|
||||
following:
|
||||
|
||||
server {
|
||||
listen 433 ssl;
|
||||
server_name example.com;
|
||||
.. other configuration ..
|
||||
location /hydra/ {
|
||||
|
||||
proxy_pass http://127.0.0.1:3000/;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Request-Base /hydra;
|
||||
}
|
||||
}
|
||||
|
||||
Note the trailing slash on the `proxy_pass` directive, which causes nginx to
|
||||
strip off the `/hydra/` part of the URL before passing it to hydra.
|
||||
|
||||
Populating a Cache
|
||||
------------------
|
||||
|
||||
A common use for Hydra is to pre-build and cache derivations which
|
||||
take a long time to build. While it is possible to direcly access the
|
||||
Hydra server's store over SSH, a more scalable option is to upload
|
||||
built derivations to a remote store like an [S3-compatible object
|
||||
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||
the `store_uri` parameter will cause Hydra to sign and upload
|
||||
derivations as they are built:
|
||||
|
||||
```
|
||||
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||
```
|
||||
|
||||
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||
compression on derivations to reduce CPU usage on the server, but
|
||||
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||
build logs because it has better browser support.
|
||||
|
||||
See [`nix help
|
||||
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||
for a description of the store URI format.
|
||||
|
||||
Statsd Configuration
|
||||
--------------------
|
||||
|
||||
By default, Hydra will send stats to statsd at `localhost:8125`. Point Hydra to a different server via:
|
||||
|
||||
```
|
||||
<statsd>
|
||||
host = alternative.host
|
||||
port = 18125
|
||||
</statsd>
|
||||
```
|
||||
|
||||
hydra-notify's Prometheus service
|
||||
---------------------------------
|
||||
|
||||
hydra-notify supports running a Prometheus webserver for metrics. The
|
||||
exporter does not run unless a listen address and port are specified
|
||||
in the hydra configuration file, as below:
|
||||
|
||||
```conf
|
||||
<hydra_notify>
|
||||
<prometheus>
|
||||
listen_address = 127.0.0.1
|
||||
port = 9199
|
||||
</prometheus>
|
||||
</hydra_notify>
|
||||
```
|
||||
|
||||
hydra-queue-runner's Prometheus service
|
||||
---------------------------------------
|
||||
|
||||
hydra-queue-runner supports running a Prometheus webserver for metrics. The
|
||||
exporter's address defaults to exposing on `127.0.0.1:9198`, but is also
|
||||
configurable through the hydra configuration file and a command line argument,
|
||||
as below. A port of `:0` will make the exposer choose a random, available port.
|
||||
|
||||
```conf
|
||||
queue_runner_metrics_address = 127.0.0.1:9198
|
||||
# or
|
||||
queue_runner_metrics_address = [::]:9198
|
||||
```
|
||||
|
||||
```shell
|
||||
$ hydra-queue-runner --prometheus-address 127.0.0.1:9198
|
||||
# or
|
||||
$ hydra-queue-runner --prometheus-address [::]:9198
|
||||
```
|
||||
|
||||
Using LDAP as authentication backend (optional)
|
||||
-----------------------------------------------
|
||||
|
||||
Instead of using Hydra's built-in user management you can optionally
|
||||
use LDAP to manage roles and users.
|
||||
|
||||
This is configured by defining the `<ldap>` block in the configuration file.
|
||||
In this block it's possible to configure the authentication plugin in the
|
||||
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
|
||||
The documentation for the available settings can be found
|
||||
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
|
||||
Note that the bind password (if needed) should be supplied as an included file to
|
||||
prevent it from leaking to the Nix store.
|
||||
|
||||
Roles can be assigned to users based on their LDAP group membership. For this
|
||||
to work *use\_roles = 1* needs to be defined for the authentication plugin.
|
||||
LDAP groups can then be mapped to Hydra roles using the `<role_mapping>` block.
|
||||
|
||||
Example configuration:
|
||||
```
|
||||
<ldap>
|
||||
<config>
|
||||
<credential>
|
||||
class = Password
|
||||
password_field = password
|
||||
password_type = self_check
|
||||
</credential>
|
||||
<store>
|
||||
class = LDAP
|
||||
ldap_server = localhost
|
||||
<ldap_server_options>
|
||||
timeout = 30
|
||||
</ldap_server_options>
|
||||
binddn = "cn=root,dc=example"
|
||||
include ldap-password.conf
|
||||
start_tls = 0
|
||||
<start_tls_options>
|
||||
verify = none
|
||||
</start_tls_options>
|
||||
user_basedn = "ou=users,dc=example"
|
||||
user_filter = "(&(objectClass=inetOrgPerson)(cn=%s))"
|
||||
user_scope = one
|
||||
user_field = cn
|
||||
<user_search_options>
|
||||
deref = always
|
||||
</user_search_options>
|
||||
# Important for role mappings to work:
|
||||
use_roles = 1
|
||||
role_basedn = "ou=groups,dc=example"
|
||||
role_filter = "(&(objectClass=groupOfNames)(member=%s))"
|
||||
role_scope = one
|
||||
role_field = cn
|
||||
role_value = dn
|
||||
<role_search_options>
|
||||
deref = always
|
||||
</role_search_options>
|
||||
</store>
|
||||
</config>
|
||||
<role_mapping>
|
||||
# Make all users in the hydra_admin group Hydra admins
|
||||
hydra_admin = admin
|
||||
# Allow all users in the dev group to eval jobsets, restart jobs and cancel builds
|
||||
dev = eval-jobset
|
||||
dev = restart-jobs
|
||||
dev = cancel-build
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
Then, place the password to your LDAP server in `/var/lib/hydra/ldap-password.conf`:
|
||||
|
||||
```
|
||||
bindpw = the-ldap-password
|
||||
```
|
||||
|
||||
### Debugging LDAP
|
||||
|
||||
Set the `debug` parameter under `ldap.config.ldap_server_options.debug`:
|
||||
|
||||
```
|
||||
<ldap>
|
||||
<config>
|
||||
<store>
|
||||
<ldap_server_options>
|
||||
debug = 2
|
||||
</ldap_server_options>
|
||||
</store>
|
||||
</config>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
### Legacy LDAP Configuration
|
||||
|
||||
Hydra used to load the LDAP configuration from a YAML file in the
|
||||
`HYDRA_LDAP_CONFIG` environment variable. This behavior is deperecated
|
||||
and will be removed.
|
||||
|
||||
When Hydra uses the deprecated YAML file, Hydra applies the following
|
||||
default role mapping:
|
||||
|
||||
```
|
||||
<ldap>
|
||||
<role_mapping>
|
||||
hydra_admin = admin
|
||||
hydra_bump-to-front = bump-to-front
|
||||
hydra_cancel-build = cancel-build
|
||||
hydra_create-projects = create-projects
|
||||
hydra_restart-jobs = restart-jobs
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
Note that configuring both the LDAP parameters in the hydra.conf and via
|
||||
the environment variable is a fatal error.
|
||||
|
||||
Webhook Authentication
|
||||
---------------------
|
||||
|
||||
Hydra supports authenticating webhook requests from GitHub and Gitea to prevent unauthorized job evaluations.
|
||||
Webhook secrets should be stored in separate files outside the Nix store for security using Config::General's include mechanism.
|
||||
|
||||
In your main `hydra.conf`:
|
||||
```apache
|
||||
<webhooks>
|
||||
Include /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
</webhooks>
|
||||
```
|
||||
|
||||
Then create `/var/lib/hydra/secrets/webhook-secrets.conf` with your actual secrets:
|
||||
```apache
|
||||
<github>
|
||||
secret = your-github-webhook-secret
|
||||
</github>
|
||||
<gitea>
|
||||
secret = your-gitea-webhook-secret
|
||||
</gitea>
|
||||
```
|
||||
|
||||
For multiple secrets (useful for rotation or multiple environments), use an array:
|
||||
```apache
|
||||
<github>
|
||||
secret = your-github-webhook-secret-prod
|
||||
secret = your-github-webhook-secret-staging
|
||||
</github>
|
||||
```
|
||||
|
||||
**Important**: The secrets file should have restricted permissions (e.g., 0600) to prevent unauthorized access.
|
||||
See the [Webhooks documentation](webhooks.md) for detailed setup instructions.
|
||||
|
||||
Embedding Extra HTML
|
||||
--------------------
|
||||
|
||||
Embed an analytics widget or other HTML in the `<head>` of each HTML document via:
|
||||
|
||||
```conf
|
||||
tracker = <script src="...">
|
||||
```
|
||||
@@ -1,108 +0,0 @@
|
||||
# Hacking
|
||||
|
||||
This section provides some notes on how to hack on Hydra. To get the
|
||||
latest version of Hydra from GitHub:
|
||||
|
||||
```console
|
||||
$ git clone git://github.com/NixOS/hydra.git
|
||||
$ cd hydra
|
||||
```
|
||||
|
||||
To enter a shell in which all environment variables (such as `PERL5LIB`)
|
||||
and dependencies can be found:
|
||||
|
||||
```console
|
||||
$ nix develop
|
||||
```
|
||||
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
$ mesonConfigurePhase
|
||||
$ ninja
|
||||
```
|
||||
|
||||
You start a local database, the webserver, and other components with
|
||||
foreman:
|
||||
|
||||
```console
|
||||
$ ninja -C build
|
||||
$ foreman start
|
||||
```
|
||||
|
||||
The Hydra interface will be available on port 63333, with an admin user named "alice" with password "foobar"
|
||||
|
||||
You can run just the Hydra web server in your source tree as follows:
|
||||
|
||||
```console
|
||||
$ ./src/script/hydra-server
|
||||
```
|
||||
|
||||
You can run Hydra's test suite with the following:
|
||||
|
||||
```console
|
||||
$ meson test
|
||||
# to run as many tests as you have cores:
|
||||
$ YATH_JOB_COUNT=$NIX_BUILD_CORES meson test
|
||||
```
|
||||
|
||||
To run individual tests:
|
||||
|
||||
```console
|
||||
# Run a specific test file
|
||||
$ PERL5LIB=t/lib:$PERL5LIB perl t/test.pl t/Hydra/Controller/API/checks.t
|
||||
|
||||
# Run all tests in a directory
|
||||
$ PERL5LIB=t/lib:$PERL5LIB perl t/test.pl t/Hydra/Controller/API/
|
||||
```
|
||||
|
||||
**Warning**: Currently, the tests can fail
|
||||
if run with high parallelism [due to an issue in
|
||||
`Test::PostgreSQL`](https://github.com/TJC/Test-postgresql/issues/40)
|
||||
causing database ports to collide.
|
||||
|
||||
## Working on the Manual
|
||||
|
||||
By default, `foreman start` runs mdbook in "watch" mode. mdbook listens
|
||||
at [http://localhost:63332/](http://localhost:63332/), and
|
||||
will reload the page every time you save.
|
||||
|
||||
## Building
|
||||
|
||||
To build Hydra and its dependencies:
|
||||
|
||||
```console
|
||||
$ nix build .#packages.x86_64-linux.default
|
||||
```
|
||||
|
||||
## Development Tasks
|
||||
|
||||
### Connecting to the database
|
||||
|
||||
Assuming you're running the default configuration with `foreman start`,
|
||||
open an interactive session with Postgres via:
|
||||
|
||||
```console
|
||||
$ psql --host localhost --port 64444 hydra
|
||||
```
|
||||
|
||||
### Runinng the builder locally
|
||||
|
||||
For `hydra-queue-runner` to successfully build locally, your
|
||||
development user will need to be "trusted" by your Nix store.
|
||||
|
||||
Add yourself to the `trusted_users` option of `/etc/nix/nix.conf`.
|
||||
|
||||
On NixOS:
|
||||
|
||||
```nix
|
||||
{
|
||||
nix.settings.trusted-users = [ "YOURUSER" ];
|
||||
}
|
||||
```
|
||||
|
||||
Off NixOS, change `/etc/nix/nix.conf`:
|
||||
|
||||
```conf
|
||||
trusted-users = root YOURUSERNAME
|
||||
```
|
||||
@@ -1,165 +0,0 @@
|
||||
Installation
|
||||
============
|
||||
|
||||
This chapter explains how to install Hydra on your own build farm
|
||||
server.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
To install and use Hydra you need to have installed the following
|
||||
dependencies:
|
||||
|
||||
- Nix
|
||||
|
||||
- PostgreSQL
|
||||
|
||||
- many Perl packages, notably Catalyst, EmailSender, and NixPerl (see
|
||||
the [Hydra expression in
|
||||
Nixpkgs](https://github.com/NixOS/hydra/blob/master/release.nix) for
|
||||
the complete list)
|
||||
|
||||
At the moment, Hydra runs only on GNU/Linux (*i686-linux* and
|
||||
*x86\_64\_linux*).
|
||||
|
||||
For small projects, Hydra can be run on any reasonably modern machine.
|
||||
For individual projects you can even run Hydra on a laptop. However, the
|
||||
charm of a buildfarm server is usually that it operates without
|
||||
disturbing the developer\'s working environment and can serve releases
|
||||
over the internet. In conjunction you should typically have your source
|
||||
code administered in a version management system, such as subversion.
|
||||
Therefore, you will probably want to install a server that is connected
|
||||
to the internet. To scale up to large and/or many projects, you will
|
||||
need at least a considerable amount of diskspace to store builds. Since
|
||||
Hydra can schedule multiple simultaneous build jobs, it can be useful to
|
||||
have a multi-core machine, and/or attach multiple build machines in a
|
||||
network to the central Hydra server.
|
||||
|
||||
Of course we think it is a good idea to use the
|
||||
[NixOS](http://nixos.org/nixos) GNU/Linux distribution for your
|
||||
buildfarm server. But this is not a requirement. The Nix software
|
||||
deployment system can be installed on any GNU/Linux distribution in
|
||||
parallel to the regular package management system. Thus, you can use
|
||||
Hydra on a Debian, Fedora, SuSE, or Ubuntu system.
|
||||
|
||||
Getting Nix
|
||||
-----------
|
||||
|
||||
If your server runs NixOS you are all set to continue with installation
|
||||
of Hydra. Otherwise you first need to install Nix. The latest stable
|
||||
version can be found one [the Nix web
|
||||
site](https://nixos.org/download/), along with a manual, which
|
||||
includes installation instructions.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
The latest development snapshot of Hydra can be installed by visiting
|
||||
the URL
|
||||
[`http://hydra.nixos.org/view/hydra/unstable`](http://hydra.nixos.org/view/hydra/unstable)
|
||||
and using the one-click install available at one of the build pages. You
|
||||
can also install Hydra through the channel by performing the following
|
||||
commands:
|
||||
|
||||
nix-channel --add http://hydra.nixos.org/jobset/hydra/master/channel/latest
|
||||
nix-channel --update
|
||||
nix-env -i hydra
|
||||
|
||||
Command completion should reveal a number of command-line tools from
|
||||
Hydra, such as `hydra-queue-runner`.
|
||||
|
||||
Creating the database
|
||||
---------------------
|
||||
|
||||
Hydra stores its results in a PostgreSQL database.
|
||||
|
||||
To setup a PostgreSQL database with *hydra* as database name and user
|
||||
name, issue the following commands on the PostgreSQL server:
|
||||
|
||||
```console
|
||||
createuser -S -D -R -P hydra
|
||||
createdb -O hydra hydra
|
||||
```
|
||||
|
||||
Note that *\$prefix* is the location of Hydra in the nix store.
|
||||
|
||||
Hydra uses an environment variable to know which database should be
|
||||
used, and a variable which point to a location that holds some state. To
|
||||
set these variables for a PostgreSQL database, add the following to the
|
||||
file `~/.profile` of the user running the Hydra services.
|
||||
|
||||
```console
|
||||
export HYDRA_DBI="dbi:Pg:dbname=hydra;host=dbserver.example.org;user=hydra;"
|
||||
export HYDRA_DATA=/var/lib/hydra
|
||||
```
|
||||
|
||||
You can provide the username and password in the file `~/.pgpass`, e.g.
|
||||
|
||||
```
|
||||
dbserver.example.org:*:hydra:hydra:password
|
||||
```
|
||||
|
||||
Make sure that the *HYDRA\_DATA* directory exists and is writable for
|
||||
the user which will run the Hydra services.
|
||||
|
||||
Having set these environment variables, you can now initialise the
|
||||
database by doing:
|
||||
|
||||
```console
|
||||
hydra-init
|
||||
```
|
||||
|
||||
To create projects, you need to create a user with *admin* privileges.
|
||||
This can be done using the command `hydra-create-user`:
|
||||
|
||||
```console
|
||||
$ hydra-create-user alice --full-name 'Alice Q. User' \
|
||||
--email-address 'alice@example.org' --password-prompt --role admin
|
||||
```
|
||||
|
||||
Additional users can be created through the web interface.
|
||||
|
||||
Upgrading
|
||||
---------
|
||||
|
||||
If you\'re upgrading Hydra from a previous version, you should do the
|
||||
following to perform any necessary database schema migrations:
|
||||
|
||||
```console
|
||||
hydra-init
|
||||
```
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
To start the Hydra web server, execute:
|
||||
|
||||
```console
|
||||
hydra-server
|
||||
```
|
||||
|
||||
When the server is started, you can browse to [http://localhost:3000/]()
|
||||
to start configuring your Hydra instance.
|
||||
|
||||
The `hydra-server` command launches the web server. There are two other
|
||||
processes that come into play:
|
||||
|
||||
- The
|
||||
evaluator
|
||||
is responsible for periodically evaluating job sets, checking out
|
||||
their dependencies off their version control systems (VCS), and
|
||||
queueing new builds if the result of the evaluation changed. It is
|
||||
launched by the
|
||||
hydra-evaluator
|
||||
command.
|
||||
- The
|
||||
queue runner
|
||||
launches builds (using Nix) as they are queued by the evaluator,
|
||||
scheduling them onto the configured Nix hosts. It is launched using
|
||||
the
|
||||
hydra-queue-runner
|
||||
command.
|
||||
|
||||
All three processes must be running for Hydra to be fully functional,
|
||||
though it\'s possible to temporarily stop any one of them for
|
||||
maintenance purposes, for instance.
|
||||
@@ -1,173 +0,0 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
About Hydra
|
||||
-----------
|
||||
|
||||
Hydra is a tool for continuous integration testing and software release
|
||||
that uses a purely functional language to describe build jobs and their
|
||||
dependencies. Continuous integration is a simple technique to improve
|
||||
the quality of the software development process. An automated system
|
||||
continuously or periodically checks out the source code of a project,
|
||||
builds it, runs tests, and produces reports for the developers. Thus,
|
||||
various errors that might accidentally be committed into the code base
|
||||
are automatically caught. Such a system allows more in-depth testing
|
||||
than what developers could feasibly do manually:
|
||||
|
||||
- Portability testing
|
||||
: The software may need to be built and tested on many different
|
||||
platforms. It is infeasible for each developer to do this before
|
||||
every commit.
|
||||
- Likewise, many projects have very large test sets (e.g., regression
|
||||
tests in a compiler, or stress tests in a DBMS) that can take hours
|
||||
or days to run to completion.
|
||||
- Many kinds of static and dynamic analyses can be performed as part
|
||||
of the tests, such as code coverage runs and static analyses.
|
||||
- It may also be necessary to build many different
|
||||
variants
|
||||
of the software. For instance, it may be necessary to verify that
|
||||
the component builds with various versions of a compiler.
|
||||
- Developers typically use incremental building to test their changes
|
||||
(since a full build may take too long), but this is unreliable with
|
||||
many build management tools (such as Make), i.e., the result of the
|
||||
incremental build might differ from a full build.
|
||||
- It ensures that the software can be built from the sources under
|
||||
revision control. Users of version management systems such as CVS
|
||||
and Subversion often forget to place source files under revision
|
||||
control.
|
||||
- The machines on which the continuous integration system runs ideally
|
||||
provides a clean, well-defined build environment. If this
|
||||
environment is administered through proper SCM techniques, then
|
||||
builds produced by the system can be reproduced. In contrast,
|
||||
developer work environments are typically not under any kind of SCM
|
||||
control.
|
||||
- In large projects, developers often work on a particular component
|
||||
of the project, and do not build and test the composition of those
|
||||
components (again since this is likely to take too long). To prevent
|
||||
the phenomenon of \`\`big bang integration\'\', where components are
|
||||
only tested together near the end of the development process, it is
|
||||
important to test components together as soon as possible (hence
|
||||
continuous integration
|
||||
).
|
||||
- It allows software to be
|
||||
released
|
||||
by automatically creating packages that users can download and
|
||||
install. To do this manually represents an often prohibitive amount
|
||||
of work, as one may want to produce releases for many different
|
||||
platforms: e.g., installers for Windows and Mac OS X, RPM or Debian
|
||||
packages for certain Linux distributions, and so on.
|
||||
|
||||
In its simplest form, a continuous integration tool sits in a loop
|
||||
building and releasing software components from a version management
|
||||
system. For each component, it performs the following tasks:
|
||||
|
||||
- It obtains the latest version of the component\'s source code from
|
||||
the version management system.
|
||||
- It runs the component\'s build process (which presumably includes
|
||||
the execution of the component\'s test set).
|
||||
- It presents the results of the build (such as error logs and
|
||||
releases) to the developers, e.g., by producing a web page.
|
||||
|
||||
Examples of continuous integration tools include Jenkins, CruiseControl
|
||||
Tinderbox, Sisyphus, Anthill and BuildBot. These tools have various
|
||||
limitations.
|
||||
|
||||
- They do not manage the
|
||||
build environment
|
||||
. The build environment consists of the dependencies necessary to
|
||||
perform a build action, e.g., compilers, libraries, etc. Setting up
|
||||
the environment is typically done manually, and without proper SCM
|
||||
control (so it may be hard to reproduce a build at a later time).
|
||||
Manual management of the environment scales poorly in the number of
|
||||
configurations that must be supported. For instance, suppose that we
|
||||
want to build a component that requires a certain compiler X. We
|
||||
then have to go to each machine and install X. If we later need a
|
||||
newer version of X, the process must be repeated all over again. An
|
||||
ever worse problem occurs if there are conflicting, mutually
|
||||
exclusive versions of the dependencies. Thus, simply installing the
|
||||
latest version is not an option. Of course, we can install these
|
||||
components in different directories and manually pass the
|
||||
appropriate paths to the build processes of the various components.
|
||||
But this is a rather tiresome and error-prone process.
|
||||
- They do not easily support
|
||||
variability in software systems
|
||||
. A system may have a great deal of build-time variability: optional
|
||||
functionality, whether to build a debug or production version,
|
||||
different versions of dependencies, and so on. (For instance, the
|
||||
Linux kernel now has over 2,600 build-time configuration switches.)
|
||||
It is therefore important that a continuous integration tool can
|
||||
easily select and test different instances from the configuration
|
||||
space of the system to reveal problems, such as erroneous
|
||||
interactions between features. In a continuous integration setting,
|
||||
it is also useful to test different combinations of versions of
|
||||
subsystems, e.g., the head revision of a component against stable
|
||||
releases of its dependencies, and vice versa, as this can reveal
|
||||
various integration problems.
|
||||
|
||||
*Hydra*, is a continuous integration tool that solves these problems. It
|
||||
is built on top of the [Nix package manager](http://nixos.org/nix/),
|
||||
which has a purely functional language for describing package build
|
||||
actions and their dependencies. This allows the build environment for
|
||||
projects to be produced automatically and deterministically, and
|
||||
variability in components to be expressed naturally using functions; and
|
||||
as such is an ideal fit for a continuous build system.
|
||||
|
||||
About Us
|
||||
--------
|
||||
|
||||
Hydra is the successor of the Nix Buildfarm, which was developed in
|
||||
tandem with the Nix software deployment system. Nix was originally
|
||||
developed at the Department of Information and Computing Sciences,
|
||||
Utrecht University by the TraCE project (2003-2008). The project was
|
||||
funded by the Software Engineering Research Program Jacquard to improve
|
||||
the support for variability in software systems. Funding for the
|
||||
development of Nix and Hydra is now provided by the NIRICT LaQuSo Build
|
||||
Farm project.
|
||||
|
||||
About this Manual
|
||||
-----------------
|
||||
|
||||
This manual tells you how to install the Hydra buildfarm software on
|
||||
your own server and how to operate that server using its web interface.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Hydra is free software: you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free
|
||||
Software Foundation, either version 3 of the License, or (at your
|
||||
option) any later version.
|
||||
|
||||
Hydra is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the [GNU General Public
|
||||
License](http://www.gnu.org/licenses/) for more details.
|
||||
|
||||
Hydra at `nixos.org`
|
||||
--------------------
|
||||
|
||||
The `nixos.org` installation of Hydra runs at
|
||||
[`http://hydra.nixos.org/`](http://hydra.nixos.org/). That installation
|
||||
is used to build software components from the [Nix](http://nixos.org),
|
||||
[NixOS](http://nixos.org/nixos), [GNU](http://www.gnu.org/),
|
||||
[Stratego/XT](http://strategoxt.org), and related projects.
|
||||
|
||||
If you are one of the developers on those projects, it is likely that
|
||||
you will be using the NixOS Hydra server in some way. If you need to
|
||||
administer automatic builds for your project, you should pull the right
|
||||
strings to get an account on the server. This manual will tell you how
|
||||
to set up new projects and build jobs within those projects and write a
|
||||
release.nix file to describe the build process of your project to Hydra.
|
||||
You can skip the next chapter.
|
||||
|
||||
If your project does not yet have automatic builds within the NixOS
|
||||
Hydra server, it may actually be eligible. We are in the process of
|
||||
setting up a large buildfarm that should be able to support open source
|
||||
and academic software projects. Get in touch.
|
||||
|
||||
Hydra on your own buildfarm
|
||||
---------------------------
|
||||
|
||||
If you need to run your own Hydra installation,
|
||||
[installation chapter](installation.md) explains how to download and install the
|
||||
system on your own server.
|
||||
@@ -1,21 +0,0 @@
|
||||
# Hydra Jobs
|
||||
|
||||
## Derivation Attributes
|
||||
|
||||
Hydra stores the following job attributes in its database:
|
||||
|
||||
* `nixName` - the Derivation's `name` attribute
|
||||
* `system` - the Derivation's `system` attribute
|
||||
* `drvPath` - the Derivation's path in the Nix store
|
||||
* `outputs` - A JSON dictionary of output names and their store path.
|
||||
|
||||
### Meta fields
|
||||
|
||||
* `description` - `meta.description`, a string
|
||||
* `license` - a comma separated list of license names from `meta.license`, expected to be a list of attribute sets with an attribute named `shortName`, ex: `[ { shortName = "licensename"} ]`.
|
||||
* `homepage` - `meta.homepage`, a string
|
||||
* `maintainers` - a comma separated list of maintainer email addresses from `meta.maintainers`, expected to be a list of attribute sets with an attribute named `email`, ex: `[ { email = "alice@example.com"; } ]`.
|
||||
* `schedulingPriority` - `meta.schedulingPriority`, an integer. Default: 100. Slightly prioritizes this job over other jobs within this jobset.
|
||||
* `timeout` - `meta.timeout`, an integer. Default: 36000. Number of seconds this job must complete within.
|
||||
* `maxSilent` - `meta.maxSilent`, an integer. Default: 7200. Number of seconds of no output on stderr / stdout before considering the job failed.
|
||||
* `isChannel` - `meta.isHydraChannel`, bool. Default: false. Deprecated.
|
||||
@@ -1,33 +0,0 @@
|
||||
# Monitoring Hydra
|
||||
|
||||
## Webserver
|
||||
|
||||
The webserver exposes Prometheus metrics for the webserver itself at `/metrics`.
|
||||
|
||||
## Queue Runner
|
||||
|
||||
The queue runner's status is exposed at `/queue-runner-status`:
|
||||
|
||||
```console
|
||||
$ curl --header "Accept: application/json" http://localhost:63333/queue-runner-status
|
||||
... JSON payload ...
|
||||
```
|
||||
|
||||
## Notification Daemon
|
||||
|
||||
The `hydra-notify` process can expose Prometheus metrics for plugin execution. See
|
||||
[hydra-notify's Prometheus service](../configuration.md#hydra-notifys-prometheus-service)
|
||||
for details on enabling and configuring the exporter.
|
||||
|
||||
The notification exporter exposes metrics on a per-plugin, per-event-type basis: execution
|
||||
durations, frequency, successes, and failures.
|
||||
|
||||
### Diagnostic Dump
|
||||
|
||||
The notification daemon can also dump its metrics to stderr whether or not the exporter
|
||||
is configured. This is particularly useful for cases where metrics data is needed but the
|
||||
exporter was not enabled.
|
||||
|
||||
To trigger this diagnostic dump, send a Postgres notification with the
|
||||
`hydra_notify_dump_metrics` channel and no payload. See
|
||||
[Re-sending a notification](../notifications.md#re-sending-a-notification).
|
||||
@@ -1,87 +0,0 @@
|
||||
# `hydra-notify` and Hydra's Notifications
|
||||
|
||||
Hydra uses a notification-based subsystem to implement some features and support plugin development. Notifications are sent to `hydra-notify`, which is responsible for dispatching each notification to each plugin.
|
||||
|
||||
Notifications are passed from `hydra-queue-runner` to `hydra-notify` through Postgres's `NOTIFY` and `LISTEN` feature.
|
||||
|
||||
## Notification Types
|
||||
|
||||
Note that the notification format is subject to change and should not be considered an API. Integrate with `hydra-notify` instead of listening directly.
|
||||
|
||||
### `cached_build_finished`
|
||||
|
||||
* **Payload:** Exactly two values, tab separated: The ID of the evaluation which contains the finished build, followed by the ID of the finished build.
|
||||
* **When:** Issued directly after an evaluation completes, when that evaluation includes this finished build.
|
||||
* **Delivery Semantics:** At most once per evaluation.
|
||||
|
||||
|
||||
### `cached_build_queued`
|
||||
|
||||
* **Payload:** Exactly two values, tab separated: The ID of the evaluation which contains the finished build, followed by the ID of the queued build.
|
||||
* **When:** Issued directly after an evaluation completes, when that evaluation includes this queued build.
|
||||
* **Delivery Semantics:** At most once per evaluation.
|
||||
|
||||
### `build_queued`
|
||||
|
||||
* **Payload:** Exactly one value, the ID of the build.
|
||||
* **When:** Issued after the transaction inserting the build in to the database is committed. One notification is sent per new build.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `build_started`
|
||||
|
||||
* **Payload:** Exactly one value, the ID of the build.
|
||||
* **When:** Issued directly before building happens, and only if the derivation's outputs cannot be substituted.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `step_finished`
|
||||
|
||||
* **Payload:** Three values, tab separated: The ID of the build which the step is part of, the step number, and the path on disk to the log file.
|
||||
* **When:** Issued directly after a step completes, regardless of success. Is not issued if the step's derivation's outputs can be substituted.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `build_finished`
|
||||
|
||||
* **Payload:** At least one value, tab separated: The ID of the build which finished, followed by IDs of all of the builds which also depended upon this build.
|
||||
* **When:** Issued directly after a build completes, regardless of success and substitutability.
|
||||
* **Delivery Semantics:** At least once.
|
||||
|
||||
`hydra-notify` will call `buildFinished` for each plugin in two ways:
|
||||
|
||||
* The `builds` table's `notificationspendingsince` column stores when the build finished. On startup, `hydra-notify` will query all builds with a non-null `notificationspendingsince` value and treat each row as a received `build_finished` event.
|
||||
|
||||
* Additionally, `hydra-notify` subscribes to `build_finished` events and processes them in real time.
|
||||
|
||||
After processing, the row's `notificationspendingsince` column is set to null.
|
||||
|
||||
It is possible for subsequent deliveries of the same `build_finished` data to imply different outcomes. For example, if the build fails, is restarted, and then succeeds. In this scenario the `build_finished` events will be delivered at least twice, once for the failure and then once for the success.
|
||||
|
||||
### `eval_started`
|
||||
|
||||
* **Payload:** Exactly two values, tab separated: an opaque trace ID representing this evaluation, and the ID of the jobset.
|
||||
* **When:** At the beginning of the evaluation phase for the jobset, before any work is done.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `eval_added`
|
||||
|
||||
* **Payload:** Exactly three values, tab separated: an opaque trace ID representing this evaluation, the ID of the jobset, and the ID of the JobsetEval record.
|
||||
* **When:** After the evaluator fetches inputs and completes the evaluation successfully.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `eval_cached`
|
||||
|
||||
* **Payload:** Exactly three values: an opaque trace ID representing this evaluation, the ID of the jobset, and the ID of the previous identical evaluation.
|
||||
* **When:** After the evaluator fetches inputs, if none of the inputs changed.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
### `eval_failed`
|
||||
|
||||
* **Payload:** Exactly two values: an opaque trace ID representing this evaluation, and the ID of the jobset.
|
||||
* **When:** After any fetching any input fails, or any other evaluation error occurs.
|
||||
* **Delivery Semantics:** Ephemeral. `hydra-notify` must be running to react to this event. No record of this event is stored.
|
||||
|
||||
## Development Notes
|
||||
|
||||
### Re-sending a notification
|
||||
|
||||
Notifications can be experimentally re-sent on the command line with `psql`, with `NOTIFY $notificationname, '$payload'`.
|
||||
|
||||
@@ -1,278 +0,0 @@
|
||||
# Plugins
|
||||
|
||||
This chapter describes all plugins present in Hydra.
|
||||
|
||||
### Inputs
|
||||
|
||||
Hydra supports the following inputs:
|
||||
|
||||
- Bazaar input
|
||||
- Darcs input
|
||||
- Git input
|
||||
- Mercurial input
|
||||
- Path input
|
||||
|
||||
## Bitbucket pull requests
|
||||
|
||||
Create jobs based on open bitbucket pull requests.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `bitbucket_authorization.<owner>`
|
||||
|
||||
## Bitbucket status
|
||||
|
||||
Sets Bitbucket CI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `enable_bitbucket_status`
|
||||
- `bitbucket.username`
|
||||
- `bitbucket.password`
|
||||
|
||||
## CircleCI Notification
|
||||
|
||||
Sets CircleCI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `circleci.[].jobs`
|
||||
- `circleci.[].vcstype`
|
||||
- `circleci.[].token`
|
||||
|
||||
## Compress build logs
|
||||
|
||||
Compresses build logs after a build with bzip2 or zstd.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `compress_build_logs`
|
||||
|
||||
Enable log compression
|
||||
|
||||
- `compress_build_logs_compression`
|
||||
|
||||
Which compression format to use. Valid values are bzip2 (default) and zstd.
|
||||
|
||||
- `compress_build_logs_silent`
|
||||
|
||||
Whether to compress logs silently.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
compress_build_logs = 1
|
||||
```
|
||||
|
||||
## Coverity Scan
|
||||
|
||||
Uploads source code to [coverity scan](https://scan.coverity.com).
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `coverityscan.[].jobs`
|
||||
- `coverityscan.[].project`
|
||||
- `coverityscan.[].email`
|
||||
- `coverityscan.[].token`
|
||||
- `coverityscan.[].scanurl`
|
||||
|
||||
## Email notification
|
||||
|
||||
Sends email notification if build status changes.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `email_notification`
|
||||
|
||||
## Gitea status
|
||||
|
||||
Sets Gitea CI status
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `gitea_authorization.<repo-owner>`
|
||||
|
||||
## GitHub pulls
|
||||
|
||||
Create jobs based on open GitHub pull requests
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `github_authorization.<repo-owner>`
|
||||
|
||||
## Github refs
|
||||
|
||||
Hydra plugin for retrieving the list of references (branches or tags) from
|
||||
GitHub following a certain naming scheme.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `github_endpoint` (defaults to https://api.github.com)
|
||||
- `github_authorization.<repo-owner>`
|
||||
|
||||
## Github status
|
||||
|
||||
Sets GitHub CI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `githubstatus.[].jobs`
|
||||
|
||||
Regular expression for jobs to match in the format `project:jobset:job`.
|
||||
This field is required and has no default value.
|
||||
|
||||
- `githubstatus.[].excludeBuildFromContext`
|
||||
|
||||
Don't include the build's ID in the status.
|
||||
|
||||
- `githubstatus.[].context`
|
||||
|
||||
Context shown in the status
|
||||
|
||||
- `githubstatus.[].useShortContext`
|
||||
|
||||
Renames `continuous-integration/hydra` to `ci/hydra` and removes the PR suffix
|
||||
from the name. Useful to see the full path in GitHub for long job names.
|
||||
|
||||
- `githubstatus.[].description`
|
||||
|
||||
Description shown in the status. Defaults to `Hydra build #<build-id> of
|
||||
<jobname>`
|
||||
|
||||
- `githubstatus.[].inputs`
|
||||
|
||||
The input which corresponds to the github repo/rev whose
|
||||
status we want to report. Can be repeated.
|
||||
|
||||
- `githubstatus.[].authorization`
|
||||
|
||||
Verbatim contents of the Authorization header. See
|
||||
[GitHub documentation](https://developer.github.com/v3/#authentication) for
|
||||
details. This field is only used if `github_authorization.<repo-owner>` is not set.
|
||||
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<githubstatus>
|
||||
jobs = test:pr:build
|
||||
## This example will match all jobs
|
||||
#jobs = .*
|
||||
inputs = src
|
||||
authorization = Bearer gha-secret😱secret😱secret😱
|
||||
excludeBuildFromContext = 1
|
||||
</githubstatus>
|
||||
```
|
||||
|
||||
## GitLab pulls
|
||||
|
||||
Create jobs based on open gitlab pull requests.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `gitlab_authorization.<projectId>`
|
||||
|
||||
## Gitlab status
|
||||
|
||||
Sets Gitlab CI status.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `gitlab_authorization.<projectId>`
|
||||
|
||||
## InfluxDB notification
|
||||
|
||||
Writes InfluxDB events when a builds finished.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `influxdb.url`
|
||||
- `influxdb.db`
|
||||
|
||||
## RunCommand
|
||||
|
||||
Runs a shell command when the build is finished.
|
||||
|
||||
See [The RunCommand Plugin](./RunCommand.md) for more information.
|
||||
|
||||
### Configuration options:
|
||||
|
||||
- `runcommand.[].job`
|
||||
|
||||
Regular expression for jobs to match in the format `project:jobset:job`.
|
||||
Defaults to `*:*:*`.
|
||||
|
||||
- `runcommand.[].command`
|
||||
|
||||
Command to run. Can use the `$HYDRA_JSON` environment variable to access
|
||||
information about the build.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<runcommand>
|
||||
job = myProject:*:*
|
||||
command = cat $HYDRA_JSON > /tmp/hydra-output
|
||||
</runcommand>
|
||||
```
|
||||
|
||||
## S3 backup
|
||||
|
||||
Upload nars and narinfos to S3 storage.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `s3backup.[].jobs`
|
||||
- `s3backup.[].compression_type`
|
||||
- `s3backup.[].name`
|
||||
- `s3backup.[].prefix`
|
||||
|
||||
## Slack notification
|
||||
|
||||
Sending Slack notifications about build results.
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `slack.[].jobs`
|
||||
- `slack.[].force`
|
||||
- `slack.[].url`
|
||||
|
||||
|
||||
## SoTest
|
||||
|
||||
Scheduling hardware tests to SoTest controller
|
||||
|
||||
This plugin submits tests to a SoTest controller for all builds that contain
|
||||
two products matching the subtypes "sotest-binaries" and "sotest-config".
|
||||
|
||||
Build products are declared by the file "nix-support/hydra-build-products"
|
||||
relative to the root of a build, in the following format:
|
||||
|
||||
```
|
||||
file sotest-binaries /nix/store/…/binaries.zip
|
||||
file sotest-config /nix/store/…/config.yaml
|
||||
```
|
||||
|
||||
### Configuration options
|
||||
|
||||
- `sotest.[].uri`
|
||||
|
||||
URL of the controller, defaults to `https://opensource.sotest.io`
|
||||
|
||||
- `sotest.[].authfile`
|
||||
|
||||
File containing `username:password`
|
||||
|
||||
- `sotest.[].priority`
|
||||
|
||||
Optional priority setting.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<sotest>
|
||||
uri = https://sotest.example
|
||||
authfile = /var/lib/hydra/sotest.auth
|
||||
priority = 1
|
||||
</sotest>
|
||||
```
|
||||
@@ -1,83 +0,0 @@
|
||||
## The RunCommand Plugin
|
||||
|
||||
Hydra supports executing a program after certain builds finish.
|
||||
This behavior is disabled by default.
|
||||
|
||||
Hydra executes these commands under the `hydra-notify` service.
|
||||
|
||||
### Static Commands
|
||||
|
||||
Configure specific commands to execute after the specified matching job finishes.
|
||||
|
||||
#### Configuration
|
||||
|
||||
- `runcommand.[].job`
|
||||
|
||||
A matcher for jobs to match in the format `project:jobset:job`. Defaults to `*:*:*`.
|
||||
|
||||
**Note:** This matcher format is not a regular expression.
|
||||
The `*` is a wildcard for that entire section, partial matches are not supported.
|
||||
|
||||
- `runcommand.[].command`
|
||||
|
||||
Command to run. Can use the `$HYDRA_JSON` environment variable to access information about the build.
|
||||
|
||||
### Example
|
||||
|
||||
```xml
|
||||
<runcommand>
|
||||
job = myProject:*:*
|
||||
command = cat $HYDRA_JSON > /tmp/hydra-output
|
||||
</runcommand>
|
||||
```
|
||||
|
||||
### Dynamic Commands
|
||||
|
||||
Hydra can optionally run RunCommand hooks defined dynamically by the jobset. In
|
||||
order to enable dynamic commands, you must enable this feature in your
|
||||
`hydra.conf`, *as well as* in the parent project and jobset configuration.
|
||||
|
||||
#### Behavior
|
||||
|
||||
Hydra will execute any program defined under the `runCommandHook` attribute set. These jobs must have a single output named `out`, and that output must be an executable file located directly at `$out`.
|
||||
|
||||
#### Security Properties
|
||||
|
||||
Safely deploying dynamic commands requires careful design of your Hydra jobs. Allowing arbitrary users to define attributes in your top level attribute set will allow that user to execute code on your Hydra.
|
||||
|
||||
If a jobset has dynamic commands enabled, you must ensure only trusted users can define top level attributes.
|
||||
|
||||
|
||||
#### Configuration
|
||||
|
||||
- `dynamicruncommand.enable`
|
||||
|
||||
Set to 1 to enable dynamic RunCommand program execution.
|
||||
|
||||
#### Example
|
||||
|
||||
In your Hydra configuration, specify:
|
||||
|
||||
```xml
|
||||
<dynamicruncommand>
|
||||
enable = 1
|
||||
</dynamicruncommand>
|
||||
```
|
||||
|
||||
Then create a job named `runCommandHook.example` in your jobset:
|
||||
|
||||
```
|
||||
{ pkgs, ... }: {
|
||||
runCommandHook = {
|
||||
recurseForDerivations = true;
|
||||
|
||||
example = pkgs.writeScript "run-me" ''
|
||||
#!${pkgs.runtimeShell}
|
||||
|
||||
${pkgs.jq}/bin/jq . "$HYDRA_JSON"
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
After the `runcommandHook.example` build finishes that script will execute.
|
||||
@@ -1,146 +0,0 @@
|
||||
## Declarative Projects
|
||||
|
||||
Hydra supports declaratively configuring a project\'s jobsets. This
|
||||
configuration can be done statically, or generated by a build job.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> Hydra will treat the project\'s declarative input as a static definition
|
||||
> if and only if the spec file contains a dictionary of dictionaries. If
|
||||
> the value of any key in the spec is not a dictionary, it will treat the
|
||||
> spec as a generated declarative spec.
|
||||
|
||||
### Static, Declarative Projects
|
||||
|
||||
Hydra supports declarative projects, where jobsets are configured from a
|
||||
static JSON document in a repository.
|
||||
|
||||
To configure a static declarative project, take the following steps:
|
||||
|
||||
1. Create a Hydra-fetchable source like a Git repository or local path.
|
||||
|
||||
2. In that source, create a file called `spec.json`, and add the
|
||||
specification for all of the jobsets. Each key is jobset and each
|
||||
value is a jobset\'s specification. For example:
|
||||
|
||||
``` {.json}
|
||||
{
|
||||
"nixpkgs": {
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "Nixpkgs",
|
||||
"nixexprinput": "nixpkgs",
|
||||
"nixexprpath": "pkgs/top-level/release.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"enable_dynamic_run_command": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"nixpkgs": {
|
||||
"type": "git",
|
||||
"value": "git://github.com/NixOS/nixpkgs.git master",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"nixos": {
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "NixOS: Small Evaluation",
|
||||
"nixexprinput": "nixpkgs",
|
||||
"nixexprpath": "nixos/release-small.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"enable_dynamic_run_command": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"nixpkgs": {
|
||||
"type": "git",
|
||||
"value": "git://github.com/NixOS/nixpkgs.git master",
|
||||
"emailresponsible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
3. Create a new project, and set the project\'s declarative input type,
|
||||
declarative input value, and declarative spec file to point to the
|
||||
source and JSON file you created in step 2.
|
||||
|
||||
Hydra will create a special jobset named `.jobsets`. When the `.jobsets`
|
||||
jobset is evaluated, this static specification will be used for
|
||||
configuring the rest of the project\'s jobsets.
|
||||
|
||||
|
||||
### Generated, Declarative Projects
|
||||
|
||||
Hydra also supports generated declarative projects, where jobsets are
|
||||
configured automatically from specification files instead of being
|
||||
managed through the UI. A jobset specification is a JSON object
|
||||
containing the configuration of the jobset, for example:
|
||||
|
||||
``` {.json}
|
||||
{
|
||||
"enabled": 1,
|
||||
"hidden": false,
|
||||
"description": "js",
|
||||
"nixexprinput": "src",
|
||||
"nixexprpath": "release.nix",
|
||||
"checkinterval": 300,
|
||||
"schedulingshares": 100,
|
||||
"enableemail": false,
|
||||
"enable_dynamic_run_command": false,
|
||||
"emailoverride": "",
|
||||
"keepnr": 3,
|
||||
"inputs": {
|
||||
"src": { "type": "git", "value": "git://github.com/shlevy/declarative-hydra-example.git", "emailresponsible": false },
|
||||
"nixpkgs": { "type": "git", "value": "git://github.com/NixOS/nixpkgs.git release-16.03", "emailresponsible": false }
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To configure a declarative project, take the following steps:
|
||||
|
||||
1. Create a jobset repository in the normal way (e.g. a git repo with a
|
||||
`release.nix` file, any other needed helper files, and taking any
|
||||
kind of hydra input), but without adding it to the UI. The nix
|
||||
expression of this repository should contain a single job, named
|
||||
`jobsets`. The output of the `jobsets` job should be a JSON file
|
||||
containing an object of jobset specifications. Each member of the
|
||||
object will become a jobset of the project, configured by the
|
||||
corresponding jobset specification.
|
||||
|
||||
2. In some hydra-fetchable source (potentially, but not necessarily,
|
||||
the same repo you created in step 1), create a JSON file containing
|
||||
a jobset specification that points to the jobset repository you
|
||||
created in the first step, specifying any needed inputs
|
||||
(e.g. nixpkgs) as necessary.
|
||||
|
||||
3. In the project creation/edit page, set declarative input type,
|
||||
declarative input value, and declarative spec file to point to the
|
||||
source and JSON file you created in step 2.
|
||||
|
||||
Hydra will create a special jobset named `.jobsets`, which whenever
|
||||
evaluated will go through the steps above in reverse order:
|
||||
|
||||
1. Hydra will fetch the input specified by the declarative input type
|
||||
and value.
|
||||
|
||||
2. Hydra will use the configuration given in the declarative spec file
|
||||
as the jobset configuration for this evaluation. In addition to any
|
||||
inputs specified in the spec file, hydra will also pass the
|
||||
`declInput` argument corresponding to the input fetched in step 1 and
|
||||
the `projectName` argument containing the project\'s name.
|
||||
|
||||
3. As normal, hydra will build the jobs specified in the jobset
|
||||
repository, which in this case is the single `jobsets` job. When
|
||||
that job completes, hydra will read the created jobset
|
||||
specifications and create corresponding jobsets in the project,
|
||||
disabling any jobsets that used to exist but are not present in the
|
||||
current spec.
|
||||
@@ -1,413 +0,0 @@
|
||||
Creating and Managing Projects
|
||||
==============================
|
||||
|
||||
Once Hydra is installed and running, the next step is to add projects to
|
||||
the build farm. We follow the example of the [Patchelf
|
||||
project](http://nixos.org/patchelf.html), a software tool written in C
|
||||
and using the GNU Build System (GNU Autoconf and GNU Automake).
|
||||
|
||||
Log in to the web interface of your Hydra installation using the user
|
||||
name and password you inserted in the database (by default, Hydra\'s web
|
||||
server listens on [`localhost:3000`](http://localhost:3000/)). Then
|
||||
follow the \"Create Project\" link to create a new project.
|
||||
|
||||
Project Information
|
||||
-------------------
|
||||
|
||||
A project definition consists of some general information and a set of
|
||||
job sets. The general information identifies a project, its owner, and
|
||||
current state of activity. Here\'s what we fill in for the patchelf
|
||||
project:
|
||||
|
||||
Identifier: patchelf
|
||||
|
||||
The *identifier* is the identity of the project. It is used in URLs and
|
||||
in the names of build results.
|
||||
|
||||
The identifier should be a unique name (it is the primary database key
|
||||
for the project table in the database). If you try to create a project
|
||||
with an already existing identifier you\'d get an error message from the
|
||||
database. So try to create the project after entering just the general
|
||||
information to figure out if you have chosen a unique name. Job sets can
|
||||
be added once the project has been created.
|
||||
|
||||
Display name: Patchelf
|
||||
|
||||
The *display name* is used in menus.
|
||||
|
||||
Description: A tool for modifying ELF binaries
|
||||
|
||||
The *description* is used as short documentation of the nature of the
|
||||
project.
|
||||
|
||||
Owner: eelco
|
||||
|
||||
The *owner* of a project can create and edit job sets.
|
||||
|
||||
Enabled: Yes
|
||||
|
||||
Only if the project is *enabled* are builds performed.
|
||||
|
||||
Once created there should be an entry for the project in the sidebar. Go
|
||||
to the project page for the
|
||||
[Patchelf](http://localhost:3000/project/patchelf) project.
|
||||
|
||||
Job Sets
|
||||
--------
|
||||
|
||||
A project can consist of multiple *job sets* (hereafter *jobsets*),
|
||||
separate tasks that can be built separately, but may depend on each
|
||||
other (without cyclic dependencies, of course). Go to the
|
||||
[Edit](http://localhost:3000/project/patchelf/edit) page of the Patchelf
|
||||
project and \"Add a new jobset\" by providing the following
|
||||
\"Information\":
|
||||
|
||||
Identifier: trunk
|
||||
Description: Trunk
|
||||
Nix expression: release.nix in input patchelfSrc
|
||||
|
||||
This states that in order to build the `trunk` jobset, the Nix
|
||||
expression in the file `release.nix`, which can be obtained from input
|
||||
`patchelfSrc`, should be evaluated. (We\'ll have a look at `release.nix`
|
||||
later.)
|
||||
|
||||
To realize a job we probably need a number of inputs, which can be
|
||||
declared in the table below. As many inputs as required can be added.
|
||||
For patchelf we declare the following inputs.
|
||||
|
||||
patchelfSrc
|
||||
'Git checkout' https://github.com/NixOS/patchelf
|
||||
|
||||
nixpkgs 'Git checkout' https://github.com/NixOS/nixpkgs
|
||||
|
||||
officialRelease Boolean false
|
||||
|
||||
system String value "i686-linux"
|
||||
|
||||
Building Jobs
|
||||
-------------
|
||||
|
||||
Build Recipes
|
||||
-------------
|
||||
|
||||
Build jobs and *build recipes* for a jobset are specified in a text file
|
||||
written in the [Nix language](http://nixos.org/nix/). The recipe is
|
||||
actually called a *Nix expression* in Nix parlance. By convention this
|
||||
file is often called `release.nix`.
|
||||
|
||||
The `release.nix` file is typically kept under version control, and the
|
||||
repository that contains it one of the build inputs of the
|
||||
corresponding--often called `hydraConfig` by convention. The repository
|
||||
for that file and the actual file name are specified on the web
|
||||
interface of Hydra under the `Setup` tab of the jobset\'s overview page,
|
||||
under the `Nix
|
||||
expression` heading. See, for example, the [jobset overview
|
||||
page](http://hydra.nixos.org/jobset/patchelf/trunk) of the PatchELF
|
||||
project, and [the corresponding Nix
|
||||
file](https://github.com/NixOS/patchelf/blob/master/release.nix).
|
||||
|
||||
Knowledge of the Nix language is recommended, but the example below
|
||||
should already give a good idea of how it works:
|
||||
|
||||
let
|
||||
pkgs = import <nixpkgs> {}; ①
|
||||
|
||||
jobs = rec { ②
|
||||
|
||||
tarball = ③
|
||||
pkgs.releaseTools.sourceTarball { ④
|
||||
name = "hello-tarball";
|
||||
src = <hello>; ⑤
|
||||
buildInputs = (with pkgs; [ gettext texLive texinfo ]);
|
||||
};
|
||||
|
||||
build = ⑥
|
||||
{ system ? builtins.currentSystem }: ⑦
|
||||
|
||||
let pkgs = import <nixpkgs> { inherit system; }; in
|
||||
pkgs.releaseTools.nixBuild { ⑧
|
||||
name = "hello";
|
||||
src = jobs.tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
};
|
||||
in
|
||||
jobs ⑨
|
||||
|
||||
|
||||
This file shows what a `release.nix` file for
|
||||
[GNU Hello](http://www.gnu.org/software/hello/) would look like.
|
||||
GNU Hello is representative of many GNU and non-GNU free software
|
||||
projects:
|
||||
|
||||
- it uses the GNU Build System, namely GNU Autoconf, and GNU Automake;
|
||||
for users, it means it can be installed using the
|
||||
usual
|
||||
./configure && make install
|
||||
procedure
|
||||
;
|
||||
- it uses Gettext for internationalization;
|
||||
- it has a Texinfo manual, which can be rendered as PDF with TeX.
|
||||
|
||||
The file defines a jobset consisting of two jobs: `tarball`, and
|
||||
`build`. It contains the following elements (referenced from the figure
|
||||
by numbers):
|
||||
|
||||
1. This defines a variable `pkgs` holding the set of packages provided
|
||||
by [Nixpkgs](http://nixos.org/nixpkgs/).
|
||||
|
||||
Since `nixpkgs` appears in angle brackets, there must be a build
|
||||
input of that name in the Nix search path. In this case, the web
|
||||
interface should show a `nixpkgs` build input, which is a checkout
|
||||
of the Nixpkgs source code repository; Hydra then adds this and
|
||||
other build inputs to the Nix search path when evaluating
|
||||
`release.nix`.
|
||||
|
||||
2. This defines a variable holding the two Hydra jobs--an *attribute
|
||||
set* in Nix.
|
||||
|
||||
3. This is the definition of the first job, named `tarball`. The
|
||||
purpose of this job is to produce a usable source code tarball.
|
||||
|
||||
4. The `tarball` job calls the `sourceTarball` function, which
|
||||
(roughly) runs `autoreconf && ./configure &&
|
||||
make dist` on the checkout. The `buildInputs` attribute
|
||||
specifies additional software dependencies for the job.
|
||||
|
||||
> The package names used in `buildInputs`--e.g., `texLive`--are the
|
||||
> names of the *attributes* corresponding to these packages in
|
||||
> Nixpkgs, specifically in the
|
||||
> [`all-packages.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/all-packages.nix)
|
||||
> file. See the section entitled "Package Naming" in the Nixpkgs
|
||||
> manual for more information.
|
||||
|
||||
5. The `tarball` jobs expects a `hello` build input to be available in
|
||||
the Nix search path. Again, this input is passed by Hydra and is
|
||||
meant to be a checkout of GNU Hello\'s source code repository.
|
||||
|
||||
6. This is the definition of the `build` job, whose purpose is to build
|
||||
Hello from the tarball produced above.
|
||||
|
||||
7. The `build` function takes one parameter, `system`, which should be
|
||||
a string defining the Nix system type--e.g., `"x86_64-linux"`.
|
||||
Additionally, it refers to `jobs.tarball`, seen above.
|
||||
|
||||
Hydra inspects the formal argument list of the function (here, the
|
||||
`system` argument) and passes it the corresponding parameter
|
||||
specified as a build input on Hydra\'s web interface. Here, `system`
|
||||
is passed by Hydra when it calls `build`. Thus, it must be defined
|
||||
as a build input of type string in Hydra, which could take one of
|
||||
several values.
|
||||
|
||||
The question mark after `system` defines the default value for this
|
||||
argument, and is only useful when debugging locally.
|
||||
|
||||
8. The `build` job calls the `nixBuild` function, which unpacks the
|
||||
tarball, then runs `./configure && make
|
||||
&& make check && make install`.
|
||||
|
||||
9. Finally, the set of jobs is returned to Hydra, as a Nix attribute
|
||||
set.
|
||||
|
||||
Building from the Command Line
|
||||
------------------------------
|
||||
|
||||
It is often useful to test a build recipe, for instance before it is
|
||||
actually used by Hydra, when testing changes, or when debugging a build
|
||||
issue. Since build recipes for Hydra jobsets are just plain Nix
|
||||
expressions, they can be evaluated using the standard Nix tools.
|
||||
|
||||
To evaluate the `tarball` jobset of the above example, just
|
||||
run:
|
||||
|
||||
```console
|
||||
$ nix-build release.nix -A tarball
|
||||
```
|
||||
|
||||
However, doing this with the example as is will probably
|
||||
yield an error like this:
|
||||
|
||||
error: user-thrown exception: file `hello' was not found in the Nix search path (add it using $NIX_PATH or -I)
|
||||
|
||||
The error is self-explanatory. Assuming `$HOME/src/hello` points to a
|
||||
checkout of Hello, this can be fixed this way:
|
||||
|
||||
```console
|
||||
$ nix-build -I ~/src release.nix -A tarball
|
||||
```
|
||||
|
||||
Similarly, the `build` jobset can be evaluated:
|
||||
|
||||
```console
|
||||
$ nix-build -I ~/src release.nix -A build
|
||||
```
|
||||
|
||||
The `build` job reuses the result of the `tarball` job, rebuilding it
|
||||
only if it needs to.
|
||||
|
||||
Adding More Jobs
|
||||
----------------
|
||||
|
||||
The example illustrates how to write the most basic
|
||||
jobs, `tarball` and `build`. In practice, much more can be done by using
|
||||
features readily provided by Nixpkgs or by creating new jobs as
|
||||
customizations of existing jobs.
|
||||
|
||||
For instance, test coverage report for projects compiled with GCC can be
|
||||
automatically generated using the `coverageAnalysis` function provided
|
||||
by Nixpkgs instead of `nixBuild`. Back to our GNU Hello example, we can
|
||||
define a `coverage` job that produces an HTML code coverage report
|
||||
directly readable from the corresponding Hydra build page:
|
||||
|
||||
coverage =
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
let pkgs = import nixpkgs { inherit system; }; in
|
||||
pkgs.releaseTools.coverageAnalysis {
|
||||
name = "hello";
|
||||
src = jobs.tarball;
|
||||
configureFlags = [ "--disable-silent-rules" ];
|
||||
};
|
||||
|
||||
As can be seen, the only difference compared to `build` is the use of
|
||||
`coverageAnalysis`.
|
||||
|
||||
Nixpkgs provides many more build tools, including the ability to run
|
||||
build in virtual machines, which can themselves run another GNU/Linux
|
||||
distribution, which allows for the creation of packages for these
|
||||
distributions. Please see [the `pkgs/build-support/release`
|
||||
directory](https://github.com/NixOS/nixpkgs/tree/master/pkgs/build-support/release)
|
||||
of Nixpkgs for more. The NixOS manual also contains information about
|
||||
whole-system testing in virtual machine.
|
||||
|
||||
Now, assume we want to build Hello with an old version of GCC, and with
|
||||
different `configure` flags. A new `build_exotic` job can be written
|
||||
that simply *overrides* the relevant arguments passed to `nixBuild`:
|
||||
|
||||
build_exotic =
|
||||
{ system ? builtins.currentSystem }:
|
||||
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system; };
|
||||
build = jobs.build { inherit system; };
|
||||
in
|
||||
pkgs.lib.overrideDerivation build (attrs: {
|
||||
buildInputs = [ pkgs.gcc33 ];
|
||||
preConfigure = "gcc --version";
|
||||
configureFlags =
|
||||
attrs.configureFlags ++ [ "--disable-nls" ];
|
||||
});
|
||||
|
||||
The `build_exotic` job reuses `build` and overrides some of its
|
||||
arguments: it adds a dependency on GCC 3.3, a pre-configure phase that
|
||||
runs `gcc --version`, and adds the `--disable-nls` configure flags.
|
||||
|
||||
This customization mechanism is very powerful. For instance, it can be
|
||||
used to change the way Hello and *all* its dependencies--including the C
|
||||
library and compiler used to build it--are built. See the Nixpkgs manual
|
||||
for more.
|
||||
|
||||
Declarative Projects
|
||||
--------------------
|
||||
|
||||
see this [chapter](./plugins/declarative-projects.md)
|
||||
|
||||
Email Notifications
|
||||
-------------------
|
||||
|
||||
Hydra can send email notifications when the status of a build changes.
|
||||
This provides immediate feedback to maintainers or committers when a
|
||||
change causes build failures.
|
||||
|
||||
The feature can be turned on by adding the following line to `hydra.conf`
|
||||
|
||||
``` conf
|
||||
email_notification = 1
|
||||
```
|
||||
|
||||
By default, Hydra only sends email notifications if a previously successful
|
||||
build starts to fail. In order to force Hydra to send an email for each build
|
||||
(including e.g. successful or cancelled ones), the environment variable
|
||||
`HYDRA_FORCE_SEND_MAIL` can be declared:
|
||||
|
||||
``` nix
|
||||
services.hydra-dev.extraEnv.HYDRA_FORCE_SEND_MAIL = "1";
|
||||
```
|
||||
|
||||
SASL Authentication for the email address that's used to send notifications
|
||||
can be configured like this:
|
||||
|
||||
``` conf
|
||||
EMAIL_SENDER_TRANSPORT_sasl_username=hydra@example.org
|
||||
EMAIL_SENDER_TRANSPORT_sasl_password=verysecret
|
||||
EMAIL_SENDER_TRANSPORT_port=587
|
||||
EMAIL_SENDER_TRANSPORT_ssl=starttls
|
||||
```
|
||||
|
||||
Further information about these environment variables can be found at the
|
||||
[MetaCPAN documentation of `Email::Sender::Manual::QuickStart`](https://metacpan.org/pod/Email::Sender::Manual::QuickStart#specifying-transport-in-the-environment).
|
||||
|
||||
It's recommended to not put this in `services.hydra-dev.extraEnv` as this would
|
||||
leak the secrets into the Nix store. Instead, it should be written into an
|
||||
environment file and configured like this:
|
||||
|
||||
``` nix
|
||||
{ systemd.services.hydra-notify = {
|
||||
serviceConfig.EnvironmentFile = "/etc/secrets/hydra-mail-cfg";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
The simplest approach to enable Email Notifications is to use the ssmtp
|
||||
package, which simply hands off the emails to another SMTP server. For
|
||||
details on how to configure ssmtp, see the documentation for the
|
||||
`networking.defaultMailServer` option. To use ssmtp for the Hydra email
|
||||
notifications, add it to the path option of the Hydra services in your
|
||||
`/etc/nixos/configuration.nix` file:
|
||||
|
||||
systemd.services.hydra-queue-runner.path = [ pkgs.ssmtp ];
|
||||
systemd.services.hydra-server.path = [ pkgs.ssmtp ];
|
||||
|
||||
Gitea Integration
|
||||
-----------------
|
||||
|
||||
Hydra can notify Git servers (such as [GitLab](https://gitlab.com/), [GitHub](https://github.com)
|
||||
or [Gitea](https://gitea.io/en-us/)) about the result of a build from a Git checkout.
|
||||
|
||||
This section describes how it can be implemented for `gitea`, but the approach for `gitlab` is
|
||||
analogous:
|
||||
|
||||
* [Obtain an API token for your user](https://docs.gitea.io/en-us/api-usage/#authentication)
|
||||
* Add it to a file which only users in the hydra group can read like this: see [including files](configuration.md#including-files) for more information
|
||||
```
|
||||
<gitea_authorization>
|
||||
your_username=your_token
|
||||
</gitea_authorization>
|
||||
```
|
||||
|
||||
* Include the file in your `hydra.conf` like this:
|
||||
``` nix
|
||||
{
|
||||
services.hydra-dev.extraConfig = ''
|
||||
Include /path/to/secret/file
|
||||
'';
|
||||
}
|
||||
```
|
||||
|
||||
* For a jobset with a `Git`-input which points to a `gitea`-instance, add the following
|
||||
additional inputs:
|
||||
|
||||
| Type | Name | Value |
|
||||
| -------------- | ------------------- | ---------------------------------- |
|
||||
| `String value` | `gitea_repo_name` | *Name of the repository to build* |
|
||||
| `String value` | `gitea_repo_owner` | *Owner of the repository* |
|
||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||
|
||||
Content-addressed derivations
|
||||
-----------------------------
|
||||
|
||||
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||
|
||||
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||
@@ -1,168 +0,0 @@
|
||||
# Webhook Authentication Migration Guide
|
||||
|
||||
This guide helps Hydra administrators migrate from unauthenticated webhooks to authenticated webhooks to secure their Hydra instances against unauthorized job evaluations.
|
||||
|
||||
## Why Migrate?
|
||||
|
||||
Currently, Hydra's webhook endpoints (`/api/push-github` and `/api/push-gitea`) accept any POST request without authentication. This vulnerability allows:
|
||||
- Anyone to trigger expensive job evaluations
|
||||
- Potential denial of service through repeated requests
|
||||
- Manipulation of build timing and scheduling
|
||||
|
||||
## Step-by-Step Migration for NixOS
|
||||
|
||||
### 1. Create Webhook Configuration
|
||||
|
||||
Create a webhook secrets configuration file with the generated secrets:
|
||||
|
||||
```bash
|
||||
# Create the secrets configuration file with inline secret generation
|
||||
cat > /var/lib/hydra/secrets/webhook-secrets.conf <<EOF
|
||||
<github>
|
||||
secret = $(openssl rand -hex 32)
|
||||
</github>
|
||||
<gitea>
|
||||
secret = $(openssl rand -hex 32)
|
||||
</gitea>
|
||||
EOF
|
||||
|
||||
# Set secure permissions
|
||||
chmod 0440 /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
chown hydra:hydra /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
```
|
||||
|
||||
**Important**: Save the generated secrets to configure them in GitHub/Gitea later. You can view them with:
|
||||
```bash
|
||||
cat /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
```
|
||||
|
||||
Then update your NixOS configuration to include the webhook configuration:
|
||||
|
||||
```nix
|
||||
{
|
||||
services.hydra-dev = {
|
||||
enable = true;
|
||||
hydraURL = "https://hydra.example.com";
|
||||
notificationSender = "hydra@example.com";
|
||||
|
||||
extraConfig = ''
|
||||
<webhooks>
|
||||
Include /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
</webhooks>
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
For multiple secrets (useful for rotation or multiple environments), update your webhook-secrets.conf:
|
||||
|
||||
```apache
|
||||
<github>
|
||||
secret = your-github-webhook-secret-prod
|
||||
secret = your-github-webhook-secret-staging
|
||||
</github>
|
||||
<gitea>
|
||||
secret = your-gitea-webhook-secret
|
||||
</gitea>
|
||||
```
|
||||
|
||||
### 2. Deploy Configuration
|
||||
|
||||
Apply the NixOS configuration:
|
||||
|
||||
```bash
|
||||
nixos-rebuild switch
|
||||
```
|
||||
|
||||
This will automatically restart Hydra services with the new configuration.
|
||||
|
||||
### 3. Verify Configuration
|
||||
|
||||
Check Hydra's logs to ensure secrets were loaded successfully:
|
||||
|
||||
```bash
|
||||
journalctl -u hydra-server | grep -i webhook
|
||||
```
|
||||
|
||||
You should not see warnings about webhook authentication not being configured.
|
||||
|
||||
### 4. Update Your Webhooks
|
||||
|
||||
#### GitHub
|
||||
1. Navigate to your repository settings: `https://github.com/<owner>/<repo>/settings/hooks`
|
||||
2. Edit your existing Hydra webhook
|
||||
3. In the "Secret" field, paste the content of `/var/lib/hydra/secrets/github-webhook-secret`
|
||||
4. Click "Update webhook"
|
||||
5. GitHub will send a ping event to verify the configuration
|
||||
|
||||
#### Gitea
|
||||
1. Navigate to your repository webhook settings
|
||||
2. Edit your existing Hydra webhook
|
||||
3. In the "Secret" field, paste the content of `/var/lib/hydra/secrets/gitea-webhook-secret`
|
||||
4. Click "Update Webhook"
|
||||
5. Use the "Test Delivery" button to verify the configuration
|
||||
|
||||
### 5. Test the Configuration
|
||||
|
||||
After updating each webhook:
|
||||
1. Make a test commit to trigger the webhook
|
||||
2. Check Hydra's logs for successful authentication
|
||||
3. Verify the evaluation was triggered in Hydra's web interface
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### 401 Unauthorized Errors
|
||||
|
||||
If webhooks start failing with 401 errors:
|
||||
- Verify the secret in the Git forge matches the file content exactly
|
||||
- Check file permissions: `ls -la /var/lib/hydra/secrets/`
|
||||
- Ensure no extra whitespace in secret files
|
||||
- Check Hydra logs for specific error messages
|
||||
|
||||
### Webhook Still Unauthenticated
|
||||
|
||||
If you see warnings about unauthenticated webhooks after configuration:
|
||||
- Verify the configuration syntax in your NixOS module
|
||||
- Ensure the NixOS configuration was successfully applied
|
||||
- Check that the webhook-secrets.conf file exists and is readable by the Hydra user
|
||||
- Verify the Include path is correct in your hydra.conf
|
||||
- Check the syntax of your webhook-secrets.conf file
|
||||
|
||||
### Testing Without Git Forge
|
||||
|
||||
You can test webhook authentication using curl:
|
||||
|
||||
```bash
|
||||
# Read the secret
|
||||
SECRET=$(cat /var/lib/hydra/secrets/github-webhook-secret)
|
||||
|
||||
# Create test payload
|
||||
PAYLOAD='{"ref":"refs/heads/main","repository":{"clone_url":"https://github.com/test/repo.git"}}'
|
||||
|
||||
# Calculate signature
|
||||
SIGNATURE="sha256=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2)"
|
||||
|
||||
# Send authenticated request
|
||||
curl -X POST https://your-hydra/api/push-github \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Hub-Signature-256: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
```
|
||||
|
||||
For Gitea (no prefix in signature):
|
||||
```bash
|
||||
# Read the secret
|
||||
SECRET=$(cat /var/lib/hydra/secrets/gitea-webhook-secret)
|
||||
|
||||
# Create test payload
|
||||
PAYLOAD='{"ref":"refs/heads/main","repository":{"clone_url":"https://gitea.example.com/test/repo.git"}}'
|
||||
|
||||
# Calculate signature
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2)
|
||||
|
||||
# Send authenticated request
|
||||
curl -X POST https://your-hydra/api/push-gitea \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Gitea-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
```
|
||||
@@ -1,101 +0,0 @@
|
||||
# Webhooks
|
||||
|
||||
Hydra can be notified by github or gitea with webhooks to trigger a new evaluation when a
|
||||
jobset has a github repo in its input.
|
||||
|
||||
## Webhook Authentication
|
||||
|
||||
Hydra supports webhook signature verification for both GitHub and Gitea using HMAC-SHA256. This ensures that webhook
|
||||
requests are coming from your configured Git forge and haven't been tampered with.
|
||||
|
||||
### Configuring Webhook Authentication
|
||||
|
||||
1. **Create webhook configuration**: Generate and store webhook secrets securely:
|
||||
```bash
|
||||
# Create directory and generate secrets in one step
|
||||
mkdir -p /var/lib/hydra/secrets
|
||||
cat > /var/lib/hydra/secrets/webhook-secrets.conf <<EOF
|
||||
<github>
|
||||
secret = $(openssl rand -hex 32)
|
||||
</github>
|
||||
<gitea>
|
||||
secret = $(openssl rand -hex 32)
|
||||
</gitea>
|
||||
EOF
|
||||
|
||||
# Set secure permissions
|
||||
chmod 0600 /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
chown hydra:hydra /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
```
|
||||
|
||||
2. **Configure Hydra**: Add the following to your `hydra.conf`:
|
||||
```apache
|
||||
<webhooks>
|
||||
Include /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
</webhooks>
|
||||
```
|
||||
|
||||
3. **Configure your Git forge**: View the generated secrets and configure them in GitHub/Gitea:
|
||||
```bash
|
||||
grep "secret =" /var/lib/hydra/secrets/webhook-secrets.conf
|
||||
```
|
||||
|
||||
### Multiple Secrets Support
|
||||
|
||||
Hydra supports configuring multiple secrets for each platform, which is useful for:
|
||||
- Zero-downtime secret rotation
|
||||
- Supporting multiple environments (production/staging)
|
||||
- Gradual migration of webhooks
|
||||
|
||||
To configure multiple secrets, use array syntax:
|
||||
```apache
|
||||
<github>
|
||||
secret = current-webhook-secret
|
||||
secret = previous-webhook-secret
|
||||
</github>
|
||||
```
|
||||
|
||||
## GitHub
|
||||
|
||||
To set up a webhook for a GitHub repository go to `https://github.com/<yourhandle>/<yourrepo>/settings`
|
||||
and in the `Webhooks` tab click on `Add webhook`.
|
||||
|
||||
- In `Payload URL` fill in `https://<your-hydra-domain>/api/push-github`.
|
||||
- In `Content type` switch to `application/json`.
|
||||
- In the `Secret` field, enter the content of your GitHub webhook secret file (if authentication is configured).
|
||||
- For `Which events would you like to trigger this webhook?` keep the default option for events on `Just the push event.`.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
### Verifying GitHub Webhook Security
|
||||
|
||||
After configuration, GitHub will send webhook requests with an `X-Hub-Signature-256` header containing the HMAC-SHA256
|
||||
signature of the request body. Hydra will verify this signature matches the configured secret.
|
||||
|
||||
## Gitea
|
||||
|
||||
To set up a webhook for a Gitea repository go to the settings of the repository in your Gitea instance
|
||||
and in the `Webhooks` tab click on `Add Webhook` and choose `Gitea` in the drop down.
|
||||
|
||||
- In `Target URL` fill in `https://<your-hydra-domain>/api/push-gitea`.
|
||||
- Keep HTTP method `POST`, POST Content Type `application/json` and Trigger On `Push Events`.
|
||||
- In the `Secret` field, enter the content of your Gitea webhook secret file (if authentication is configured).
|
||||
- Change the branch filter to match the git branch hydra builds.
|
||||
|
||||
Then add the hook with `Add webhook`.
|
||||
|
||||
### Verifying Gitea Webhook Security
|
||||
|
||||
After configuration, Gitea will send webhook requests with an `X-Gitea-Signature` header containing the HMAC-SHA256
|
||||
signature of the request body. Hydra will verify this signature matches the configured secret.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you receive 401 Unauthorized errors:
|
||||
- Verify the webhook secret in your Git forge matches the content of the secret file exactly
|
||||
- Check that the secret file has proper permissions (should be 0600)
|
||||
- Look at Hydra's logs for specific error messages
|
||||
- Ensure the correct signature header is being sent by your Git forge
|
||||
|
||||
If you see warnings about webhook authentication not being configured:
|
||||
- Configure webhook authentication as described above to secure your endpoints
|
||||
288
doc/manual/style.css
Normal file
288
doc/manual/style.css
Normal file
@@ -0,0 +1,288 @@
|
||||
/* Copied from http://bakefile.sourceforge.net/, which appears
|
||||
licensed under the GNU GPL. */
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Basic headers and text:
|
||||
***************************************************************************/
|
||||
|
||||
body
|
||||
{
|
||||
font-family: sans-serif;
|
||||
background: white;
|
||||
margin: 2em 1em 2em 1em;
|
||||
}
|
||||
|
||||
h1,h2,h3
|
||||
{
|
||||
color: #005aa0;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
h1 /* title */
|
||||
{
|
||||
font-size: 200%;
|
||||
}
|
||||
|
||||
h2 /* chapters, appendices, subtitle */
|
||||
{
|
||||
font-size: 180%;
|
||||
}
|
||||
|
||||
/* Extra space between chapters, appendices. */
|
||||
div.chapter > div.titlepage h2, div.appendix > div.titlepage h2
|
||||
{
|
||||
margin-top: 1.5em;
|
||||
}
|
||||
|
||||
div.section > div.titlepage h2 /* sections */
|
||||
{
|
||||
font-size: 150%;
|
||||
margin-top: 1.5em;
|
||||
}
|
||||
|
||||
h3 /* subsections */
|
||||
{
|
||||
font-size: 125%;
|
||||
}
|
||||
|
||||
div.simplesect h2
|
||||
{
|
||||
font-size: 110%;
|
||||
}
|
||||
|
||||
div.appendix h3
|
||||
{
|
||||
font-size: 150%;
|
||||
margin-top: 1.5em;
|
||||
}
|
||||
|
||||
div.refnamediv h2, div.refsynopsisdiv h2, div.refsection h2 /* refentry parts */
|
||||
{
|
||||
margin-top: 1.4em;
|
||||
font-size: 125%;
|
||||
}
|
||||
|
||||
div.refsection h3
|
||||
{
|
||||
font-size: 110%;
|
||||
}
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Examples:
|
||||
***************************************************************************/
|
||||
|
||||
div.example
|
||||
{
|
||||
border: 1px solid #6185a0;
|
||||
padding: 6px 6px;
|
||||
margin-left: 1.5em;
|
||||
margin-right: 1.5em;
|
||||
background: #f4f4f8;
|
||||
}
|
||||
|
||||
div.example p.title
|
||||
{
|
||||
margin-top: 0em;
|
||||
}
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Screen dumps:
|
||||
***************************************************************************/
|
||||
|
||||
pre.screen, pre.programlisting
|
||||
{
|
||||
border: 1px solid #6185a0;
|
||||
padding: 3px 3px;
|
||||
margin-left: 1.5em;
|
||||
margin-right: 1.5em;
|
||||
color: #600000;
|
||||
background: #f4f4f8;
|
||||
font-family: monospace;
|
||||
/* font-size: 90%; */
|
||||
}
|
||||
|
||||
div.example pre.programlisting
|
||||
{
|
||||
border: 0px;
|
||||
padding: 0 0;
|
||||
margin: 0 0 0 0;
|
||||
}
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Notes, warnings etc:
|
||||
***************************************************************************/
|
||||
|
||||
.note, .warning
|
||||
{
|
||||
border: 1px solid #6185a0;
|
||||
padding: 3px 3px;
|
||||
margin-left: 1.5em;
|
||||
margin-right: 1.5em;
|
||||
margin-bottom: 1em;
|
||||
padding: 0.3em 0.3em 0.3em 0.3em;
|
||||
background: #fffff5;
|
||||
}
|
||||
|
||||
div.note, div.warning
|
||||
{
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
div.note h3, div.warning h3
|
||||
{
|
||||
color: red;
|
||||
font-size: 100%;
|
||||
// margin: 0 0 0 0;
|
||||
padding-right: 0.5em;
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.note p, div.warning p
|
||||
{
|
||||
margin-bottom: 0em;
|
||||
}
|
||||
|
||||
div.note h3 + p, div.warning h3 + p
|
||||
{
|
||||
display: inline;
|
||||
}
|
||||
|
||||
div.note h3
|
||||
{
|
||||
color: blue;
|
||||
font-size: 100%;
|
||||
}
|
||||
|
||||
div.navfooter *
|
||||
{
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Links colors and highlighting:
|
||||
***************************************************************************/
|
||||
|
||||
a:link { color: #0048b3; }
|
||||
a:visited { color: #002a6a; }
|
||||
a:hover { background: #ffffcd; }
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Table of contents:
|
||||
***************************************************************************/
|
||||
|
||||
.toc
|
||||
{
|
||||
font-size: 90%;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
Special elements:
|
||||
***************************************************************************/
|
||||
|
||||
tt, code
|
||||
{
|
||||
color: #400000;
|
||||
}
|
||||
|
||||
.term
|
||||
{
|
||||
font-weight: bold;
|
||||
|
||||
}
|
||||
|
||||
div.variablelist dd p, div.glosslist dd p
|
||||
{
|
||||
margin-top: 0em;
|
||||
}
|
||||
|
||||
div.variablelist dd, div.glosslist dd
|
||||
{
|
||||
margin-left: 1.5em;
|
||||
}
|
||||
|
||||
div.glosslist dt
|
||||
{
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.default
|
||||
{
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.availability
|
||||
{
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.varname
|
||||
{
|
||||
color: #400000;
|
||||
}
|
||||
|
||||
|
||||
div.informaltable table
|
||||
{
|
||||
border: 1px solid #6185a0;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.informaltable td
|
||||
{
|
||||
border: 0;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
div.informaltable td.default
|
||||
{
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
div.informaltable th
|
||||
{
|
||||
text-align: left;
|
||||
color: #005aa0;
|
||||
border: 0;
|
||||
padding: 5px;
|
||||
background: #fffff5;
|
||||
font-weight: normal;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
td.varname, td.tagname, td.paramname
|
||||
{
|
||||
font-weight: bold;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
div.epigraph
|
||||
{
|
||||
font-style: italic;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
table.productionset table.productionset
|
||||
{
|
||||
font-family: monospace;
|
||||
}
|
||||
|
||||
strong.command
|
||||
{
|
||||
// font-family: monospace;
|
||||
// font-style: italic;
|
||||
// font-weight: normal;
|
||||
color: #400000;
|
||||
}
|
||||
|
||||
div.calloutlist td
|
||||
{
|
||||
padding-bottom: 1em;
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
#
|
||||
# jobset example file. This file can be referenced as Nix expression
|
||||
# in a jobset configuration along with inputs for nixpkgs and the
|
||||
# repository containing this file.
|
||||
#
|
||||
{ ... }:
|
||||
let
|
||||
# <nixpkgs> is set to the value designated by the nixpkgs input of the
|
||||
# jobset configuration.
|
||||
pkgs = (import <nixpkgs> {});
|
||||
in {
|
||||
hello = pkgs.hello;
|
||||
}
|
||||
63
flake.lock
generated
63
flake.lock
generated
@@ -1,63 +0,0 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nix": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1760573252,
|
||||
"narHash": "sha256-mcvNeNdJP5R7huOc8Neg0qZESx/0DMg8Fq6lsdx0x8U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nix",
|
||||
"rev": "3c39583e5512729f9c5a44c3b03b6467a2acd963",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "2.32-maintenance",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-eval-jobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1760478325,
|
||||
"narHash": "sha256-hA+NOH8KDcsuvH7vJqSwk74PyZP3MtvI/l+CggZcnTc=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-eval-jobs",
|
||||
"rev": "daa42f9e9c84aeff1e325dd50fda321f53dfd02c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"ref": "v2.32.1",
|
||||
"repo": "nix-eval-jobs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1759652726,
|
||||
"narHash": "sha256-2VjnimOYDRb3DZHyQ2WH2KCouFqYm9h0Rr007Al/WSA=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "06b2985f0cc9eb4318bf607168f4b15af1e5e81d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.05-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nix": "nix",
|
||||
"nix-eval-jobs": "nix-eval-jobs",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
140
flake.nix
140
flake.nix
@@ -1,140 +0,0 @@
|
||||
{
|
||||
description = "A Nix-based continuous build system";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05-small";
|
||||
|
||||
inputs.nix = {
|
||||
url = "github:NixOS/nix/2.32-maintenance";
|
||||
# We want to control the deps precisely
|
||||
flake = false;
|
||||
};
|
||||
|
||||
inputs.nix-eval-jobs = {
|
||||
url = "github:nix-community/nix-eval-jobs/v2.32.1";
|
||||
# We want to control the deps precisely
|
||||
flake = false;
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, nix, nix-eval-jobs, ... }:
|
||||
let
|
||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
forEachSystem = nixpkgs.lib.genAttrs systems;
|
||||
in
|
||||
rec {
|
||||
|
||||
# A Nixpkgs overlay that provides a 'hydra' package.
|
||||
overlays.default = final: prev: {
|
||||
nixDependenciesForHydra = final.lib.makeScope final.newScope
|
||||
(import (nix + "/packaging/dependencies.nix") {
|
||||
pkgs = final;
|
||||
inherit (final) stdenv;
|
||||
inputs = {};
|
||||
});
|
||||
nixComponentsForHydra = final.lib.makeScope final.nixDependenciesForHydra.newScope
|
||||
(import (nix + "/packaging/components.nix") {
|
||||
officialRelease = true;
|
||||
inherit (final) lib;
|
||||
pkgs = final;
|
||||
src = nix;
|
||||
maintainers = [ ];
|
||||
});
|
||||
nix-eval-jobs = final.callPackage nix-eval-jobs {
|
||||
nixComponents = final.nixComponentsForHydra;
|
||||
};
|
||||
hydra = final.callPackage ./package.nix {
|
||||
inherit (final.lib) fileset;
|
||||
rawSrc = self;
|
||||
nixComponents = final.nixComponentsForHydra;
|
||||
};
|
||||
};
|
||||
|
||||
hydraJobs = {
|
||||
build = forEachSystem (system: packages.${system}.hydra);
|
||||
|
||||
buildNoTests = forEachSystem (system:
|
||||
packages.${system}.hydra.overrideAttrs (_: {
|
||||
doCheck = false;
|
||||
})
|
||||
);
|
||||
|
||||
manual = forEachSystem (system: let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
hydra = self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
in
|
||||
pkgs.runCommand "hydra-manual-${hydra.version}" { }
|
||||
''
|
||||
mkdir -p $out/share
|
||||
cp -prvd ${hydra.doc}/share/doc $out/share/
|
||||
|
||||
mkdir $out/nix-support
|
||||
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
|
||||
'');
|
||||
|
||||
tests = import ./nixos-tests.nix {
|
||||
inherit forEachSystem nixpkgs nixosModules;
|
||||
};
|
||||
|
||||
container = nixosConfigurations.container.config.system.build.toplevel;
|
||||
};
|
||||
|
||||
checks = forEachSystem (system: {
|
||||
build = hydraJobs.build.${system};
|
||||
install = hydraJobs.tests.install.${system};
|
||||
validate-openapi = hydraJobs.tests.validate-openapi.${system};
|
||||
});
|
||||
|
||||
packages = forEachSystem (system: let
|
||||
inherit (nixpkgs) lib;
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
nixDependencies = lib.makeScope pkgs.newScope
|
||||
(import (nix + "/packaging/dependencies.nix") {
|
||||
inherit pkgs;
|
||||
inherit (pkgs) stdenv;
|
||||
inputs = {};
|
||||
});
|
||||
nixComponents = lib.makeScope nixDependencies.newScope
|
||||
(import (nix + "/packaging/components.nix") {
|
||||
officialRelease = true;
|
||||
inherit lib pkgs;
|
||||
src = nix;
|
||||
maintainers = [ ];
|
||||
});
|
||||
in {
|
||||
nix-eval-jobs = pkgs.callPackage nix-eval-jobs {
|
||||
inherit nixComponents;
|
||||
};
|
||||
hydra = pkgs.callPackage ./package.nix {
|
||||
inherit (nixpkgs.lib) fileset;
|
||||
inherit nixComponents;
|
||||
inherit (self.packages.${system}) nix-eval-jobs;
|
||||
rawSrc = self;
|
||||
};
|
||||
default = self.packages.${system}.hydra;
|
||||
});
|
||||
|
||||
nixosModules = import ./nixos-modules {
|
||||
inherit self;
|
||||
};
|
||||
|
||||
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules =
|
||||
[
|
||||
self.nixosModules.hydra
|
||||
self.nixosModules.hydraTest
|
||||
self.nixosModules.hydraProxy
|
||||
{
|
||||
system.configurationRevision = self.lastModifiedDate;
|
||||
|
||||
boot.isContainer = true;
|
||||
networking.useDHCP = false;
|
||||
networking.firewall.allowedTCPPorts = [ 80 ];
|
||||
networking.hostName = "hydra";
|
||||
|
||||
services.hydra-dev.useSubstitutes = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
export PATH=$(pwd)/src/script:$PATH
|
||||
|
||||
# wait for hydra-server to listen
|
||||
while ! nc -z localhost 63333; do sleep 1; done
|
||||
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-evaluator
|
||||
@@ -1,33 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
export PATH=$(pwd)/src/script:$PATH
|
||||
|
||||
# wait for postgresql to listen
|
||||
while ! pg_isready -h $(pwd)/.hydra-data/postgres -p 64444; do sleep 1; done
|
||||
|
||||
createdb -h $(pwd)/.hydra-data/postgres -p 64444 hydra
|
||||
|
||||
# create a db for the default user. Not sure why, but
|
||||
# the terminal is otherwise spammed with:
|
||||
#
|
||||
# FATAL: database "USERNAME" does not exist
|
||||
createdb -h $(pwd)/.hydra-data/postgres -p 64444 "$(whoami)" || true
|
||||
|
||||
hydra-init
|
||||
hydra-create-user alice --password foobar --role admin
|
||||
|
||||
if [ ! -f ./.hydra-data/hydra.conf ]; then
|
||||
echo "Creating a default hydra.conf"
|
||||
cat << EOF > .hydra-data/hydra.conf
|
||||
# test-time instances likely don't want to bootstrap nixpkgs from scratch
|
||||
use-substitutes = true
|
||||
|
||||
<hydra_notify>
|
||||
<prometheus>
|
||||
listen_address = 127.0.0.1
|
||||
port = 64445
|
||||
</prometheus>
|
||||
</hydra_notify>
|
||||
EOF
|
||||
fi
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-dev-server --port 63333 --restart --debug
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
mdbook serve \
|
||||
--port 63332 \
|
||||
--dest-dir ./.hydra-data/manual \
|
||||
./doc/manual/
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
export PATH=$(pwd)/src/script:$PATH
|
||||
|
||||
# wait for hydra-server to listen
|
||||
while ! nc -z localhost 63333; do sleep 1; done
|
||||
|
||||
HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-notify
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
initdb ./.hydra-data/postgres
|
||||
exec postgres -D ./.hydra-data/postgres -k $(pwd)/.hydra-data/postgres -p 64444
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# wait until hydra is listening on port 63333
|
||||
while ! nc -z localhost 63333; do sleep 1; done
|
||||
|
||||
NIX_REMOTE_SYSTEMS="" HYDRA_CONFIG=$(pwd)/.hydra-data/hydra.conf exec hydra-queue-runner
|
||||
1142
hydra-api.yaml
1142
hydra-api.yaml
File diff suppressed because it is too large
Load Diff
229
hydra-module.nix
Normal file
229
hydra-module.nix
Normal file
@@ -0,0 +1,229 @@
|
||||
{ config, pkgs, ... }:
|
||||
|
||||
with pkgs.lib;
|
||||
|
||||
let
|
||||
cfg = config.services.hydra;
|
||||
|
||||
hydraConf = pkgs.writeScript "hydra.conf"
|
||||
''
|
||||
using_frontend_proxy 1
|
||||
base_uri ${cfg.hydraURL}
|
||||
notification_sender ${cfg.notificationSender}
|
||||
max_servers 25
|
||||
'';
|
||||
|
||||
env = ''export NIX_REMOTE=daemon ''
|
||||
+ ''HYDRA_DBI="${cfg.dbi}" ''
|
||||
+ ''HYDRA_CONFIG=${cfg.baseDir}/data/hydra.conf ''
|
||||
+ ''HYDRA_DATA=${cfg.baseDir}/data ''
|
||||
+ ''HYDRA_PORT="${toString cfg.port}" ''
|
||||
+ (if cfg.logo != null
|
||||
then ''HYDRA_LOGO="${cfg.logo}" ''
|
||||
else "")
|
||||
+ ''HYDRA_TRACKER="${cfg.tracker}" ;'';
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
###### interface
|
||||
options = {
|
||||
services.hydra = rec {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to run Hydra services.
|
||||
'';
|
||||
};
|
||||
|
||||
baseDir = mkOption {
|
||||
default = "/home/${user.default}";
|
||||
description = ''
|
||||
The directory holding configuration, logs and temporary files.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
default = "hydra";
|
||||
description = ''
|
||||
The user the Hydra services should run as.
|
||||
'';
|
||||
};
|
||||
|
||||
dbi = mkOption {
|
||||
default = "dbi:Pg:dbname=hydra;host=localhost;user=root;";
|
||||
example = "dbi:SQLite:/home/hydra/db/hydra.sqlite";
|
||||
description = ''
|
||||
The DBI string for Hydra database connection.
|
||||
'';
|
||||
};
|
||||
|
||||
hydra = mkOption {
|
||||
default = pkgs.hydra;
|
||||
description = ''
|
||||
Location of hydra
|
||||
'';
|
||||
};
|
||||
|
||||
hydraURL = mkOption {
|
||||
default = "http://hydra.nixos.org";
|
||||
description = ''
|
||||
The base URL for the Hydra webserver instance. Used for links in emails.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
default = 3000;
|
||||
description = ''
|
||||
TCP port the web server should listen to.
|
||||
'';
|
||||
};
|
||||
|
||||
minimumDiskFree = mkOption {
|
||||
default = 5;
|
||||
description = ''
|
||||
Threshold of minimum disk space (G) to determine if queue runner should run or not.
|
||||
'';
|
||||
};
|
||||
|
||||
minimumDiskFreeEvaluator = mkOption {
|
||||
default = 2;
|
||||
description = ''
|
||||
Threshold of minimum disk space (G) to determine if evaluator should run or not.
|
||||
'';
|
||||
};
|
||||
|
||||
notificationSender = mkOption {
|
||||
default = "e.dolstra@tudelft.nl";
|
||||
description = ''
|
||||
Sender email address used for email notifications.
|
||||
'';
|
||||
};
|
||||
|
||||
tracker = mkOption {
|
||||
default = "";
|
||||
description = ''
|
||||
Piece of HTML that is included on all pages.
|
||||
'';
|
||||
};
|
||||
|
||||
logo = mkOption {
|
||||
default = null;
|
||||
description = ''
|
||||
File name of an alternate logo to be displayed on the web pages.
|
||||
'';
|
||||
};
|
||||
|
||||
autoStart = mkOption {
|
||||
default = true;
|
||||
description = ''
|
||||
If hydra upstart jobs should start automatically.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ cfg.hydra ];
|
||||
|
||||
users.extraUsers = [
|
||||
{ name = cfg.user;
|
||||
description = "Hydra";
|
||||
home = cfg.baseDir;
|
||||
createHome = true;
|
||||
useDefaultShell = true;
|
||||
}
|
||||
];
|
||||
|
||||
nix.gc.automatic = true;
|
||||
# $3 / $4 don't always work depending on length of device name
|
||||
nix.gc.options = ''--max-freed "$((200 * 1024**3 - 1024 * $(df /nix/store | tail -n 1 | awk '{ print $3 }')))"'';
|
||||
|
||||
nix.extraOptions = ''
|
||||
gc-keep-outputs = true
|
||||
gc-keep-derivations = true
|
||||
|
||||
# The default (`true') slows Nix down a lot since the build farm
|
||||
# has so many GC roots.
|
||||
gc-check-reachability = false
|
||||
|
||||
# Hydra needs caching of build failures.
|
||||
build-cache-failure = true
|
||||
|
||||
build-poll-interval = 10
|
||||
|
||||
use-sqlite-wal = false
|
||||
'';
|
||||
|
||||
jobs.hydra_init =
|
||||
{ description = "hydra-init";
|
||||
startOn = "started network-interfaces";
|
||||
preStart = ''
|
||||
mkdir -p ${cfg.baseDir}/data
|
||||
chown ${cfg.user} ${cfg.baseDir}/data
|
||||
ln -sf ${hydraConf} ${cfg.baseDir}/data/hydra.conf
|
||||
'';
|
||||
exec = ''
|
||||
echo done
|
||||
'';
|
||||
};
|
||||
|
||||
jobs.hydra_server =
|
||||
{ description = "hydra-server";
|
||||
startOn = if cfg.autoStart then "started network-interfaces hydra-init" else "never";
|
||||
exec = ''
|
||||
${pkgs.su}/bin/su - ${cfg.user} -c '${env} ${cfg.hydra}/bin/hydra-server > ${cfg.baseDir}/data/server.log 2>&1'
|
||||
'';
|
||||
};
|
||||
|
||||
jobs.hydra_queue_runner =
|
||||
{ description = "hydra-queue-runner";
|
||||
startOn = if cfg.autoStart then "started network-interfaces hydra-init" else "never";
|
||||
preStart = "${pkgs.su}/bin/su - ${cfg.user} -c '${env} ${cfg.hydra}/bin/hydra-queue-runner --unlock'";
|
||||
exec = ''
|
||||
${pkgs.su}/bin/su - ${cfg.user} -c '${env} nice -n 8 ${cfg.hydra}/bin/hydra-queue-runner > ${cfg.baseDir}/data/queue-runner.log 2>&1'
|
||||
'';
|
||||
};
|
||||
|
||||
jobs.hydra_evaluator =
|
||||
{ description = "hydra-evaluator";
|
||||
startOn = if cfg.autoStart then "started network-interfaces hydra-init" else "never";
|
||||
exec = ''
|
||||
${pkgs.su}/bin/su - ${cfg.user} -c '${env} nice -n 5 ${cfg.hydra}/bin/hydra-evaluator > ${cfg.baseDir}/data/evaluator.log 2>&1'
|
||||
'';
|
||||
};
|
||||
|
||||
services.cron.systemCronJobs =
|
||||
let
|
||||
# If there is less than ... GiB of free disk space, stop the queue
|
||||
# to prevent builds from failing or aborting.
|
||||
checkSpace = pkgs.writeScript "hydra-check-space"
|
||||
''
|
||||
#! /bin/sh
|
||||
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFree} * 1024**3)) ]; then
|
||||
stop hydra-queue-runner
|
||||
fi
|
||||
if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFreeEvaluator} * 1024**3)) ]; then
|
||||
stop hydra-evaluator
|
||||
fi
|
||||
'';
|
||||
compressLogs = pkgs.writeScript "compress-logs" ''
|
||||
#! /bin/sh -e
|
||||
touch -d 'last month' r
|
||||
find /nix/var/log/nix/drvs -type f -a ! -newer r -name '*.drv' | xargs bzip2 -v
|
||||
'';
|
||||
in
|
||||
[ "*/5 * * * * root ${checkSpace} &> ${cfg.baseDir}/data/checkspace.log"
|
||||
"15 5 * * * root ${compressLogs} &> ${cfg.baseDir}/data/compress.log"
|
||||
"15 02 * * * ${cfg.user} ${env} ${cfg.hydra}/bin/hydra-update-gc-roots &> ${cfg.baseDir}/data/gc-roots.log"
|
||||
];
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
26
meson.build
26
meson.build
@@ -1,26 +0,0 @@
|
||||
project('hydra', 'cpp',
|
||||
version: files('version.txt'),
|
||||
license: 'GPL-3.0',
|
||||
default_options: [
|
||||
'debug=true',
|
||||
'optimization=2',
|
||||
'cpp_std=c++23',
|
||||
],
|
||||
)
|
||||
|
||||
nix_util_dep = dependency('nix-util', required: true)
|
||||
nix_store_dep = dependency('nix-store', required: true)
|
||||
nix_main_dep = dependency('nix-main', required: true)
|
||||
|
||||
pqxx_dep = dependency('libpqxx', required: true)
|
||||
|
||||
prom_cpp_core_dep = dependency('prometheus-cpp-core', required: true)
|
||||
prom_cpp_pull_dep = dependency('prometheus-cpp-pull', required: true)
|
||||
|
||||
mdbook = find_program('mdbook', native: true)
|
||||
perl = find_program('perl', native: true)
|
||||
|
||||
subdir('doc/manual')
|
||||
subdir('nixos-modules')
|
||||
subdir('src')
|
||||
subdir('t')
|
||||
@@ -1,47 +0,0 @@
|
||||
{ self }:
|
||||
|
||||
{
|
||||
hydra = { pkgs, lib,... }: {
|
||||
_file = ./default.nix;
|
||||
imports = [ ./hydra.nix ];
|
||||
services.hydra-dev.package = lib.mkDefault self.packages.${pkgs.hostPlatform.system}.hydra;
|
||||
};
|
||||
|
||||
hydraTest = { pkgs, ... }: {
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,495 +0,0 @@
|
||||
{ config, pkgs, lib ? pkgs.lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.hydra-dev;
|
||||
|
||||
baseDir = "/var/lib/hydra";
|
||||
|
||||
hydraConf = pkgs.writeScript "hydra.conf" cfg.extraConfig;
|
||||
|
||||
hydraEnv =
|
||||
{ HYDRA_DBI = cfg.dbi;
|
||||
HYDRA_CONFIG = "${baseDir}/hydra.conf";
|
||||
HYDRA_DATA = "${baseDir}";
|
||||
};
|
||||
|
||||
env =
|
||||
{ NIX_REMOTE = "daemon";
|
||||
PGPASSFILE = "${baseDir}/pgpass";
|
||||
NIX_REMOTE_SYSTEMS = concatStringsSep ":" cfg.buildMachinesFiles;
|
||||
} // optionalAttrs (cfg.smtpHost != null) {
|
||||
EMAIL_SENDER_TRANSPORT = "SMTP";
|
||||
EMAIL_SENDER_TRANSPORT_host = cfg.smtpHost;
|
||||
} // hydraEnv // cfg.extraEnv;
|
||||
|
||||
serverEnv = env //
|
||||
{
|
||||
COLUMNS = "80";
|
||||
PGPASSFILE = "${baseDir}/pgpass-www"; # grrr
|
||||
XDG_CACHE_HOME = "${baseDir}/www/.cache";
|
||||
} // (optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
|
||||
|
||||
localDB = "dbi:Pg:dbname=hydra;user=hydra;";
|
||||
|
||||
haveLocalDB = cfg.dbi == localDB;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
###### interface
|
||||
options = {
|
||||
|
||||
services.hydra-dev = rec {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to run Hydra services.
|
||||
'';
|
||||
};
|
||||
|
||||
dbi = mkOption {
|
||||
type = types.str;
|
||||
default = localDB;
|
||||
example = "dbi:Pg:dbname=hydra;host=postgres.example.org;user=foo;";
|
||||
description = ''
|
||||
The DBI string for Hydra database connection.
|
||||
|
||||
NOTE: Attempts to set `application_name` will be overridden by
|
||||
`hydra-TYPE` (where TYPE is e.g. `evaluator`, `queue-runner`,
|
||||
etc.) in all hydra services to more easily distinguish where
|
||||
queries are coming from.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.path;
|
||||
description = "The Hydra package.";
|
||||
};
|
||||
|
||||
hydraURL = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The base URL for the Hydra webserver instance. Used for links in emails.
|
||||
'';
|
||||
};
|
||||
|
||||
listenHost = mkOption {
|
||||
type = types.str;
|
||||
default = "*";
|
||||
example = "localhost";
|
||||
description = ''
|
||||
The hostname or address to listen on or <literal>*</literal> to listen
|
||||
on all interfaces.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 3000;
|
||||
description = ''
|
||||
TCP port the web server should listen to.
|
||||
'';
|
||||
};
|
||||
|
||||
minimumDiskFree = mkOption {
|
||||
type = types.int;
|
||||
default = 0;
|
||||
description = ''
|
||||
Threshold of minimum disk space (GiB) to determine if the queue runner should run or not.
|
||||
'';
|
||||
};
|
||||
|
||||
minimumDiskFreeEvaluator = mkOption {
|
||||
type = types.int;
|
||||
default = 0;
|
||||
description = ''
|
||||
Threshold of minimum disk space (GiB) to determine if the evaluator should run or not.
|
||||
'';
|
||||
};
|
||||
|
||||
notificationSender = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Sender email address used for email notifications.
|
||||
'';
|
||||
};
|
||||
|
||||
smtpHost = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = ["localhost"];
|
||||
description = ''
|
||||
Hostname of the SMTP server to use to send email.
|
||||
'';
|
||||
};
|
||||
|
||||
tracker = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Piece of HTML that is included on all pages.
|
||||
'';
|
||||
};
|
||||
|
||||
logo = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to a file containing the logo of your Hydra instance.
|
||||
'';
|
||||
};
|
||||
|
||||
debugServer = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to run the server in debug mode.";
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
description = "Extra lines for the Hydra configuration.";
|
||||
};
|
||||
|
||||
extraEnv = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
description = "Extra environment variables for Hydra.";
|
||||
};
|
||||
|
||||
gcRootsDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/nix/var/nix/gcroots/hydra";
|
||||
description = "Directory that holds Hydra garbage collector roots.";
|
||||
};
|
||||
|
||||
buildMachinesFiles = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
|
||||
defaultText = literalExpression ''optional (config.nix.buildMachines != []) "/etc/nix/machines"'';
|
||||
example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
|
||||
description = "List of files containing build machines.";
|
||||
};
|
||||
|
||||
useSubstitutes = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to use binary caches for downloading store paths. Note that
|
||||
binary substitutions trigger (a potentially large number of) additional
|
||||
HTTP requests that slow down the queue monitor thread significantly.
|
||||
Also, this Hydra instance will serve those downloaded store paths to
|
||||
its users with its own signature attached as if it had built them
|
||||
itself, so don't enable this feature unless your active binary caches
|
||||
are absolute trustworthy.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${baseDir} 0750 hydra hydra"
|
||||
];
|
||||
|
||||
users.extraGroups.hydra = { };
|
||||
|
||||
users.extraUsers.hydra =
|
||||
{ description = "Hydra";
|
||||
group = "hydra";
|
||||
home = baseDir;
|
||||
isSystemUser = true;
|
||||
useDefaultShell = true;
|
||||
};
|
||||
|
||||
users.extraUsers.hydra-queue-runner =
|
||||
{ description = "Hydra queue runner";
|
||||
group = "hydra";
|
||||
useDefaultShell = true;
|
||||
isSystemUser = true;
|
||||
home = "${baseDir}/queue-runner"; # really only to keep SSH happy
|
||||
};
|
||||
|
||||
users.extraUsers.hydra-www =
|
||||
{ description = "Hydra web server";
|
||||
group = "hydra";
|
||||
isSystemUser = true;
|
||||
useDefaultShell = true;
|
||||
};
|
||||
|
||||
nix.settings = {
|
||||
trusted-users = [ "hydra-queue-runner" ];
|
||||
keep-outputs = true;
|
||||
keep-derivations = true;
|
||||
};
|
||||
|
||||
services.hydra-dev.extraConfig =
|
||||
''
|
||||
using_frontend_proxy = 1
|
||||
base_uri = ${cfg.hydraURL}
|
||||
notification_sender = ${cfg.notificationSender}
|
||||
max_servers = 25
|
||||
compress_num_threads = 0
|
||||
${optionalString (cfg.logo != null) ''
|
||||
hydra_logo = ${cfg.logo}
|
||||
''}
|
||||
gc_roots_dir = ${cfg.gcRootsDir}
|
||||
use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
|
||||
|
||||
${optionalString (cfg.tracker != null) (let
|
||||
indentedTrackerData = lib.concatMapStringsSep "\n" (line: " ${line}") (lib.splitString "\n" cfg.tracker);
|
||||
in ''
|
||||
tracker = <<TRACKER
|
||||
${indentedTrackerData}
|
||||
TRACKER
|
||||
'')}
|
||||
'';
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
environment.variables = hydraEnv;
|
||||
|
||||
systemd.services.hydra-init =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = optional haveLocalDB "postgresql.service";
|
||||
after = optional haveLocalDB "postgresql.service";
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
|
||||
};
|
||||
path = [ pkgs.util-linux ];
|
||||
preStart = ''
|
||||
ln -sf ${hydraConf} ${baseDir}/hydra.conf
|
||||
|
||||
mkdir -m 0700 -p ${baseDir}/www
|
||||
chown hydra-www:hydra ${baseDir}/www
|
||||
|
||||
mkdir -m 0700 -p ${baseDir}/queue-runner
|
||||
mkdir -m 0750 -p ${baseDir}/build-logs
|
||||
mkdir -m 0750 -p ${baseDir}/runcommand-logs
|
||||
chown hydra-queue-runner:hydra \
|
||||
${baseDir}/queue-runner \
|
||||
${baseDir}/build-logs \
|
||||
${baseDir}/runcommand-logs
|
||||
|
||||
${optionalString haveLocalDB ''
|
||||
if ! [ -e ${baseDir}/.db-created ]; then
|
||||
runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/createuser hydra
|
||||
runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/createdb -O hydra hydra
|
||||
touch ${baseDir}/.db-created
|
||||
fi
|
||||
echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
|
||||
''}
|
||||
|
||||
if [ ! -e ${cfg.gcRootsDir} ]; then
|
||||
|
||||
# Move legacy roots directory.
|
||||
if [ -e /nix/var/nix/gcroots/per-user/hydra/hydra-roots ]; then
|
||||
mv /nix/var/nix/gcroots/per-user/hydra/hydra-roots ${cfg.gcRootsDir}
|
||||
fi
|
||||
|
||||
mkdir -p ${cfg.gcRootsDir}
|
||||
fi
|
||||
|
||||
# Move legacy hydra-www roots.
|
||||
if [ -e /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots ]; then
|
||||
find /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots/ -type f \
|
||||
| xargs -r mv -f -t ${cfg.gcRootsDir}/
|
||||
rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
|
||||
fi
|
||||
|
||||
chown hydra:hydra ${cfg.gcRootsDir}
|
||||
chmod 2775 ${cfg.gcRootsDir}
|
||||
'';
|
||||
serviceConfig.ExecStart = "${cfg.package}/bin/hydra-init";
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
serviceConfig.User = "hydra";
|
||||
serviceConfig.Type = "oneshot";
|
||||
serviceConfig.RemainAfterExit = true;
|
||||
};
|
||||
|
||||
systemd.services.hydra-server =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = serverEnv // {
|
||||
HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
|
||||
};
|
||||
restartTriggers = [ hydraConf ];
|
||||
serviceConfig =
|
||||
{ ExecStart =
|
||||
"@${cfg.package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
|
||||
+ "-p ${toString cfg.port} --max_spare_servers 5 --max_servers 25 "
|
||||
+ "--max_requests 100 ${optionalString cfg.debugServer "-d"}";
|
||||
User = "hydra-www";
|
||||
PermissionsStartOnly = true;
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
path = [ cfg.package pkgs.hostname-debian pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
IN_SYSTEMD = "1"; # to get log severity levels
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-queue-runner hydra-queue-runner -v";
|
||||
ExecStopPost = "${cfg.package}/bin/hydra-queue-runner --unlock";
|
||||
User = "hydra-queue-runner";
|
||||
Restart = "always";
|
||||
|
||||
# Ensure we can get core dumps.
|
||||
LimitCORE = "infinity";
|
||||
WorkingDirectory = "${baseDir}/queue-runner";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-evaluator =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = with pkgs; [ hostname-debian cfg.package ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-evaluator hydra-evaluator";
|
||||
ExecStopPost = "${cfg.package}/bin/hydra-evaluator --unlock";
|
||||
User = "hydra";
|
||||
Restart = "always";
|
||||
WorkingDirectory = baseDir;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-update-gc-roots =
|
||||
{ requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
|
||||
User = "hydra";
|
||||
};
|
||||
startAt = "2,14:15";
|
||||
};
|
||||
|
||||
systemd.services.hydra-send-stats =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
environment = env // {
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-send-stats hydra-send-stats";
|
||||
User = "hydra";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.hydra-notify =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
path = [ pkgs.zstd ];
|
||||
environment = env // {
|
||||
PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
|
||||
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
|
||||
};
|
||||
serviceConfig =
|
||||
{ ExecStart = "@${cfg.package}/bin/hydra-notify hydra-notify";
|
||||
# FIXME: run this under a less privileged user?
|
||||
User = "hydra-queue-runner";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
};
|
||||
};
|
||||
|
||||
# If there is less than a certain amount of free disk space, stop
|
||||
# the queue/evaluator to prevent builds from failing or aborting.
|
||||
# Leaves a tag file indicating this reason; if the tag file exists
|
||||
# and disk space is above the threshold + 10GB, the queue/evaluator will be
|
||||
# restarted; starting it if it is already started is not harmful.
|
||||
systemd.services.hydra-check-space =
|
||||
{ script =
|
||||
''
|
||||
spaceleft=$(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store)))
|
||||
spacestopstart() {
|
||||
service=$1
|
||||
minFreeGB=$2
|
||||
if [ $spaceleft -lt $(($minFreeGB * 1024**3)) ]; then
|
||||
if [ $(systemctl is-active $service) == active ]; then
|
||||
echo "stopping $service due to lack of free space..."
|
||||
systemctl stop $service
|
||||
date > ${baseDir}/.$service-stopped-minspace
|
||||
fi
|
||||
else
|
||||
if [ $spaceleft -gt $(( ($minFreeGB + 10) * 1024**3)) -a \
|
||||
-r ${baseDir}/.$service-stopped-minspace ] ; then
|
||||
rm ${baseDir}/.$service-stopped-minspace
|
||||
echo "restarting $service due to newly available free space..."
|
||||
systemctl start $service
|
||||
fi
|
||||
fi
|
||||
}
|
||||
spacestopstart hydra-queue-runner ${toString cfg.minimumDiskFree}
|
||||
spacestopstart hydra-evaluator ${toString cfg.minimumDiskFreeEvaluator}
|
||||
'';
|
||||
startAt = "*:0/5";
|
||||
};
|
||||
|
||||
# Periodically compress build logs. The queue runner compresses
|
||||
# logs automatically after a step finishes, but this doesn't work
|
||||
# if the queue runner is stopped prematurely.
|
||||
systemd.services.hydra-compress-logs =
|
||||
{ path = [ pkgs.bzip2 pkgs.zstd ];
|
||||
script =
|
||||
''
|
||||
set -eou pipefail
|
||||
compression=$(sed -nr 's/compress_build_logs_compression = ()/\1/p' ${baseDir}/hydra.conf)
|
||||
if [[ $compression == "" || $compression == bzip2 ]]; then
|
||||
compressionCmd=(bzip2)
|
||||
elif [[ $compression == zstd ]]; then
|
||||
compressionCmd=(zstd --rm)
|
||||
fi
|
||||
find ${baseDir}/build-logs -ignore_readdir_race -type f -name "*.drv" -mtime +3 -size +0c -print0 | xargs -0 -r "''${compressionCmd[@]}" --force --quiet
|
||||
'';
|
||||
startAt = "Sun 01:45";
|
||||
};
|
||||
|
||||
services.postgresql.enable = mkIf haveLocalDB true;
|
||||
|
||||
services.postgresql.identMap = optionalString haveLocalDB
|
||||
''
|
||||
hydra-users hydra hydra
|
||||
hydra-users hydra-queue-runner hydra
|
||||
hydra-users hydra-www hydra
|
||||
hydra-users root hydra
|
||||
# The postgres user is used to create the pg_trgm extension for the hydra database
|
||||
hydra-users postgres postgres
|
||||
'';
|
||||
|
||||
services.postgresql.authentication = optionalString haveLocalDB
|
||||
''
|
||||
local hydra all ident map=hydra-users
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
install_data('hydra.nix',
|
||||
install_dir: get_option('datadir') / 'nix',
|
||||
rename: ['hydra-module.nix'],
|
||||
)
|
||||
306
nixos-tests.nix
306
nixos-tests.nix
@@ -1,306 +0,0 @@
|
||||
{ forEachSystem, nixpkgs, nixosModules }:
|
||||
|
||||
let
|
||||
# NixOS configuration used for VM tests.
|
||||
hydraServer =
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
imports = [
|
||||
nixosModules.hydra
|
||||
nixosModules.hydraTest
|
||||
];
|
||||
|
||||
virtualisation.memorySize = 1024;
|
||||
virtualisation.writableStore = true;
|
||||
|
||||
environment.systemPackages = [ pkgs.perlPackages.LWP pkgs.perlPackages.JSON ];
|
||||
|
||||
nix = {
|
||||
# Without this nix tries to fetch packages from the default
|
||||
# cache.nixos.org which is not reachable from this sandboxed NixOS test.
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
install = forEachSystem (system:
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
machine.wait_for_job("hydra-init")
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
|
||||
notifications = forEachSystem (system:
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<influxdb>
|
||||
url = http://127.0.0.1:8086
|
||||
db = hydra
|
||||
</influxdb>
|
||||
'';
|
||||
services.influxdb.enable = true;
|
||||
};
|
||||
testScript = { nodes, ... }: ''
|
||||
machine.wait_for_job("hydra-init")
|
||||
|
||||
# Create an admin account and some other state.
|
||||
machine.succeed(
|
||||
"""
|
||||
su - hydra -c "hydra-create-user root --email-address 'alice@example.org' --password foobar --role admin"
|
||||
mkdir /run/jobset
|
||||
chmod 755 /run/jobset
|
||||
cp ${./t/jobs/api-test.nix} /run/jobset/default.nix
|
||||
chmod 644 /run/jobset/default.nix
|
||||
chown -R hydra /run/jobset
|
||||
"""
|
||||
)
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
"curl -XPOST 'http://127.0.0.1:8086/query' "
|
||||
+ "--data-urlencode 'q=CREATE DATABASE hydra'"
|
||||
)
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
"su - hydra -c 'perl -I ${nodes.machine.services.hydra-dev.package.perlDeps}/lib/perl5/site_perl ${./t/setup-notifications-jobset.pl}' >&2"
|
||||
)
|
||||
|
||||
# Wait until hydra has build the job and
|
||||
# the InfluxDBNotification plugin uploaded its notification to InfluxDB
|
||||
machine.wait_until_succeeds(
|
||||
"curl -s -H 'Accept: application/csv' "
|
||||
+ "-G 'http://127.0.0.1:8086/query?db=hydra' "
|
||||
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
|
||||
)
|
||||
'';
|
||||
});
|
||||
|
||||
gitea = forEachSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
(import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; }).makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
<gitea_authorization>
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||
nix = {
|
||||
settings.substituters = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
database.type = "postgres";
|
||||
settings = {
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
server.HTTP_PORT = 3001;
|
||||
};
|
||||
};
|
||||
services.openssh.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gitea git jq gawk ];
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
};
|
||||
skipLint = true;
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
set -x
|
||||
mkdir -p /tmp/repo $HOME/.ssh
|
||||
cat ${snakeoilKeypair.privkey} > $HOME/.ssh/privk
|
||||
chmod 0400 $HOME/.ssh/privk
|
||||
git -C /tmp/repo init
|
||||
cp ${smallDrv} /tmp/repo/jobset.nix
|
||||
git -C /tmp/repo add .
|
||||
git config --global user.email test@localhost
|
||||
git config --global user.name test
|
||||
git -C /tmp/repo commit -m 'Initial import'
|
||||
git -C /tmp/repo remote add origin gitea@machine:root/repo
|
||||
GIT_SSH_COMMAND='ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no' \
|
||||
git -C /tmp/repo push origin master
|
||||
git -C /tmp/repo log >&2
|
||||
'';
|
||||
|
||||
scripts.hydra-setup = pkgs.writeShellScript "hydra.sh" ''
|
||||
set -x
|
||||
su -l hydra -c "hydra-create-user root --email-address \
|
||||
'alice@example.org' --password foobar --role admin"
|
||||
|
||||
URL=http://localhost:3000
|
||||
USERNAME="root"
|
||||
PASSWORD="foobar"
|
||||
PROJECT_NAME="trivial"
|
||||
JOBSET_NAME="trivial"
|
||||
mycurl() {
|
||||
curl --referer $URL -H "Accept: application/json" \
|
||||
-H "Content-Type: application/json" $@
|
||||
}
|
||||
|
||||
cat >data.json <<EOF
|
||||
{ "username": "$USERNAME", "password": "$PASSWORD" }
|
||||
EOF
|
||||
mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"displayname":"Trivial",
|
||||
"enabled":"1",
|
||||
"visible":"1"
|
||||
}
|
||||
EOF
|
||||
mycurl --silent -X PUT $URL/project/$PROJECT_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
|
||||
cat >data.json <<EOF
|
||||
{
|
||||
"description": "Trivial",
|
||||
"checkinterval": "60",
|
||||
"enabled": "1",
|
||||
"visible": "1",
|
||||
"keepnr": "1",
|
||||
"enableemail": true,
|
||||
"emailoverride": "hydra@localhost",
|
||||
"type": 0,
|
||||
"nixexprinput": "git",
|
||||
"nixexprpath": "jobset.nix",
|
||||
"inputs": {
|
||||
"git": {"value": "http://localhost:3001/root/repo.git", "type": "git"},
|
||||
"gitea_repo_name": {"value": "repo", "type": "string"},
|
||||
"gitea_repo_owner": {"value": "root", "type": "string"},
|
||||
"gitea_status_repo": {"value": "git", "type": "string"},
|
||||
"gitea_http_url": {"value": "http://localhost:3001", "type": "string"}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME \
|
||||
-d @data.json -b hydra-cookie.txt
|
||||
'';
|
||||
|
||||
api_token = "d7f16a3412e01a43a414535b16007c6931d3a9c7";
|
||||
|
||||
snakeoilKeypair = {
|
||||
privkey = pkgs.writeText "privkey.snakeoil" ''
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
|
||||
AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
|
||||
r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
|
||||
-----END EC PRIVATE KEY-----
|
||||
'';
|
||||
|
||||
pubkey = pkgs.lib.concatStrings [
|
||||
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
|
||||
"yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
|
||||
"9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= sakeoil"
|
||||
];
|
||||
};
|
||||
|
||||
smallDrv = pkgs.writeText "jobset.nix" ''
|
||||
{ trivial = builtins.derivation {
|
||||
name = "trivial";
|
||||
system = "${system}";
|
||||
builder = "/bin/sh";
|
||||
allowSubstitutes = false;
|
||||
preferLocalBuild = true;
|
||||
args = ["-c" "echo success > $out; exit 0"];
|
||||
};
|
||||
}
|
||||
'';
|
||||
in
|
||||
''
|
||||
import json
|
||||
|
||||
machine.start()
|
||||
machine.wait_for_unit("multi-user.target")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port(3001)
|
||||
|
||||
machine.succeed(
|
||||
"su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
|
||||
+ "--username root --password root --email test@localhost'"
|
||||
)
|
||||
machine.succeed("su -l postgres -c 'psql gitea < ${scripts.mktoken}'")
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/repos "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"curl --fail -X POST http://localhost:3001/api/v1/user/keys "
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
+ ' -d \'{"key":"${snakeoilKeypair.pubkey}","read_only":true,"title":"SSH"}\'''
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.git-setup}"
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
"${scripts.hydra-setup}"
|
||||
)
|
||||
|
||||
machine.wait_until_succeeds(
|
||||
'curl -Lf -s http://localhost:3000/build/1 -H "Accept: application/json" '
|
||||
+ '| jq .buildstatus | xargs test 0 -eq'
|
||||
)
|
||||
|
||||
data = machine.succeed(
|
||||
'curl -Lf -s "http://localhost:3001/api/v1/repos/root/repo/statuses/$(cd /tmp/repo && git show | head -n1 | awk "{print \\$2}")" '
|
||||
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' "
|
||||
+ f"-H 'Authorization: token ${api_token}'"
|
||||
)
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
});
|
||||
|
||||
validate-openapi = forEachSystem (system:
|
||||
let pkgs = nixpkgs.legacyPackages.${system}; in
|
||||
pkgs.runCommand "validate-openapi"
|
||||
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
|
||||
''
|
||||
openapi-generator-cli validate -i ${./hydra-api.yaml}
|
||||
touch $out
|
||||
'');
|
||||
|
||||
}
|
||||
285
package.nix
285
package.nix
@@ -1,285 +0,0 @@
|
||||
{ stdenv
|
||||
, lib
|
||||
, fileset
|
||||
|
||||
, rawSrc
|
||||
|
||||
, buildEnv
|
||||
|
||||
, perlPackages
|
||||
|
||||
, nixComponents
|
||||
, git
|
||||
|
||||
, makeWrapper
|
||||
, meson
|
||||
, ninja
|
||||
, nukeReferences
|
||||
, pkg-config
|
||||
, mdbook
|
||||
|
||||
, unzip
|
||||
, libpqxx
|
||||
, top-git
|
||||
, mercurial
|
||||
, darcs
|
||||
, subversion
|
||||
, breezy
|
||||
, openssl
|
||||
, bzip2
|
||||
, libxslt
|
||||
, perl
|
||||
, pixz
|
||||
, boost
|
||||
, postgresql_13
|
||||
, nlohmann_json
|
||||
, prometheus-cpp
|
||||
|
||||
, cacert
|
||||
, foreman
|
||||
, glibcLocales
|
||||
, libressl
|
||||
, openldap
|
||||
, python3
|
||||
|
||||
, openssh
|
||||
, coreutils
|
||||
, findutils
|
||||
, gzip
|
||||
, xz
|
||||
, gnutar
|
||||
, gnused
|
||||
, nix-eval-jobs
|
||||
|
||||
, rpm
|
||||
, dpkg
|
||||
, cdrkit
|
||||
}:
|
||||
|
||||
let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = lib.closePropagation
|
||||
([
|
||||
nixComponents.nix-perl-bindings
|
||||
git
|
||||
] ++ (with perlPackages; [
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DBIxClassHelpers
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileCopyRecursive
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
NumberBytesHuman
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
]));
|
||||
};
|
||||
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "hydra";
|
||||
inherit version;
|
||||
|
||||
src = fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = fileset.unions ([
|
||||
./doc
|
||||
./meson.build
|
||||
./nixos-modules
|
||||
./src
|
||||
./t
|
||||
./version.txt
|
||||
./.perlcriticrc
|
||||
]);
|
||||
};
|
||||
|
||||
outputs = [ "out" "doc" ];
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
meson
|
||||
ninja
|
||||
nukeReferences
|
||||
pkg-config
|
||||
mdbook
|
||||
nixComponents.nix-cli
|
||||
perlDeps
|
||||
perl
|
||||
unzip
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
libpqxx
|
||||
openssl
|
||||
libxslt
|
||||
nixComponents.nix-util
|
||||
nixComponents.nix-store
|
||||
nixComponents.nix-main
|
||||
perlDeps
|
||||
perl
|
||||
boost
|
||||
nlohmann_json
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
bzip2
|
||||
darcs
|
||||
foreman
|
||||
top-git
|
||||
mercurial
|
||||
subversion
|
||||
breezy
|
||||
openldap
|
||||
postgresql_13
|
||||
pixz
|
||||
nix-eval-jobs
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
python3
|
||||
nixComponents.nix-cli
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
nixComponents.nix-cli
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
nix-eval-jobs
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
mesonBuildType = "release";
|
||||
|
||||
postPatch = ''
|
||||
patchShebangs .
|
||||
'';
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/build/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/build/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
doCheck = true;
|
||||
|
||||
mesonCheckFlags = [ "--verbose" ];
|
||||
|
||||
preCheck = ''
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nixComponents.nix-cli.name or "unknown"} \
|
||||
--set NIX_EVAL_JOBS_RELEASE ${nix-eval-jobs.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||
passthru = {
|
||||
inherit perlDeps;
|
||||
nix = nixComponents.nix-cli;
|
||||
};
|
||||
})
|
||||
133
release.nix
Normal file
133
release.nix
Normal file
@@ -0,0 +1,133 @@
|
||||
{ nixpkgs ? /etc/nixos/nixpkgs
|
||||
, hydraSrc ? { outPath = ./.; revCount = 1234; gitTag = "abcdef"; }
|
||||
, officialRelease ? false
|
||||
}:
|
||||
|
||||
|
||||
rec {
|
||||
tarball =
|
||||
with import nixpkgs { };
|
||||
|
||||
let nix = nixUnstable; in
|
||||
|
||||
releaseTools.makeSourceTarball {
|
||||
name = "hydra-tarball";
|
||||
src = hydraSrc;
|
||||
inherit officialRelease;
|
||||
version = builtins.readFile ./version;
|
||||
|
||||
buildInputs =
|
||||
[ perl libxslt dblatex tetex nukeReferences pkgconfig boehmgc git ];
|
||||
|
||||
versionSuffix = if officialRelease then "" else "pre${toString hydraSrc.revCount}-${hydraSrc.gitTag}";
|
||||
|
||||
preConfigure = ''
|
||||
# TeX needs a writable font cache.
|
||||
export VARTEXFONTS=$TMPDIR/texfonts
|
||||
'';
|
||||
|
||||
configureFlags =
|
||||
[ "--with-nix=${nix}"
|
||||
"--with-docbook-xsl=${docbook_xsl}/xml/xsl/docbook"
|
||||
];
|
||||
|
||||
postDist = ''
|
||||
make -C doc/manual install prefix="$out"
|
||||
nuke-refs "$out/share/doc/hydra/manual.pdf"
|
||||
|
||||
echo "doc manual $out/share/doc/hydra manual.html" >> \
|
||||
"$out/nix-support/hydra-build-products"
|
||||
echo "doc-pdf manual $out/share/doc/hydra/manual.pdf" >> \
|
||||
"$out/nix-support/hydra-build-products"
|
||||
'';
|
||||
};
|
||||
|
||||
build =
|
||||
{ system ? "x86_64-linux" }:
|
||||
|
||||
let pkgs = import nixpkgs {inherit system;}; in
|
||||
|
||||
with pkgs;
|
||||
|
||||
let nix = nixUnstable; in
|
||||
|
||||
releaseTools.nixBuild {
|
||||
name = "hydra";
|
||||
src = tarball;
|
||||
configureFlags = "--with-nix=${nix}";
|
||||
|
||||
buildInputs =
|
||||
[ perl makeWrapper libtool nix unzip nukeReferences pkgconfig boehmgc sqlite git gitAndTools.topGit mercurial subversion bazaar ]
|
||||
++ (import ./deps.nix) { inherit pkgs; };
|
||||
|
||||
hydraPath = stdenv.lib.concatStringsSep ":" (map (p: "${p}/bin") ( [
|
||||
libxslt sqlite subversion openssh nix coreutils findutils
|
||||
gzip bzip2 lzma gnutar unzip git gitAndTools.topGit mercurial gnused graphviz bazaar
|
||||
] ++ ( if stdenv.isLinux then [rpm dpkg cdrkit] else [] )));
|
||||
|
||||
preConfigure = "patchShebangs .";
|
||||
|
||||
postInstall = ''
|
||||
ensureDir $out/nix-support
|
||||
nuke-refs $out/share/doc/hydra/manual/manual.pdf
|
||||
|
||||
for i in $out/bin/*; do
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${tarball.version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nix.name}
|
||||
done
|
||||
''; # */
|
||||
|
||||
LOGNAME="$USER";
|
||||
|
||||
meta = {
|
||||
description = "Build of Hydra on ${system}";
|
||||
};
|
||||
|
||||
succeedOnFailure = true;
|
||||
keepBuildDirectory = true;
|
||||
};
|
||||
|
||||
|
||||
tests =
|
||||
{ nixos ? ../nixos, system ? "x86_64-linux" }:
|
||||
|
||||
let hydra = build { inherit system; }; in
|
||||
|
||||
with import "${nixos}/lib/testing.nix" { inherit nixpkgs system; };
|
||||
|
||||
{
|
||||
|
||||
install = simpleTest {
|
||||
|
||||
machine =
|
||||
{ config, pkgs, ... }:
|
||||
{ services.postgresql.enable = true;
|
||||
environment.systemPackages = [ hydra ];
|
||||
};
|
||||
|
||||
testScript =
|
||||
''
|
||||
$machine->waitForJob("postgresql");
|
||||
|
||||
# Initialise the database and the state.
|
||||
$machine->mustSucceed
|
||||
( "createdb -O root hydra",
|
||||
, "psql hydra -f ${hydra}/share/hydra/sql/hydra-postgresql.sql"
|
||||
, "mkdir /var/lib/hydra"
|
||||
);
|
||||
|
||||
# Start the web interface.
|
||||
#$machine->mustSucceed("HYDRA_DATA=/var/lib/hydra HYDRA_DBI='dbi:Pg:dbname=hydra;user=hydra;' hydra-server >&2 &");
|
||||
#$machine->waitForOpenPort("3000");
|
||||
'';
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
# The `default.nix` in flake-compat reads `flake.nix` and `flake.lock` from `src` and
|
||||
# returns an attribute set of the shape `{ defaultNix, shellNix }`
|
||||
|
||||
(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
|
||||
src = ./.;
|
||||
}).shellNix
|
||||
@@ -1,7 +1,5 @@
|
||||
# IMPORTANT: if you delete this file your app will not work as
|
||||
# expected. you have been warned
|
||||
use strict;
|
||||
use warnings;
|
||||
use inc::Module::Install;
|
||||
|
||||
name 'Hydra';
|
||||
|
||||
4
src/Makefile.am
Normal file
4
src/Makefile.am
Normal file
@@ -0,0 +1,4 @@
|
||||
SUBDIRS = c sql script lib root xsl
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
|
||||
8
src/c/Makefile.am
Normal file
8
src/c/Makefile.am
Normal file
@@ -0,0 +1,8 @@
|
||||
bin_PROGRAMS = hydra-eval-jobs
|
||||
|
||||
hydra_eval_jobs_SOURCES = hydra-eval-jobs.cc
|
||||
hydra_eval_jobs_LDADD = -lmain -lexpr -L$(nix)/lib/nix $(BDW_GC_LIBS)
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
-I$(nix)/include/nix \
|
||||
$(BDW_GC_CFLAGS)
|
||||
265
src/c/hydra-eval-jobs.cc
Normal file
265
src/c/hydra-eval-jobs.cc
Normal file
@@ -0,0 +1,265 @@
|
||||
#include <map>
|
||||
#include <iostream>
|
||||
|
||||
#include <gc/gc_allocator.h>
|
||||
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "util.hh"
|
||||
#include "xml-writer.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "common-opts.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
void printHelp()
|
||||
{
|
||||
std::cout << "Syntax: eval-jobs <expr>\n";
|
||||
}
|
||||
|
||||
|
||||
static Path gcRootsDir;
|
||||
|
||||
|
||||
typedef std::map<Symbol, std::pair<unsigned int, Value *> > ArgsUsed;
|
||||
typedef std::list<Value *, traceable_allocator<Value *> > ValueList;
|
||||
typedef std::map<Symbol, ValueList> AutoArgs;
|
||||
|
||||
|
||||
static void findJobs(EvalState & state, XMLWriter & doc,
|
||||
const ArgsUsed & argsUsed, const AutoArgs & argsLeft,
|
||||
Value & v, const string & attrPath);
|
||||
|
||||
|
||||
static void tryJobAlts(EvalState & state, XMLWriter & doc,
|
||||
const ArgsUsed & argsUsed, const AutoArgs & argsLeft,
|
||||
const string & attrPath, Value & fun,
|
||||
Formals::Formals_::iterator cur,
|
||||
Formals::Formals_::iterator last,
|
||||
const Bindings & actualArgs)
|
||||
{
|
||||
if (cur == last) {
|
||||
Value v, * arg = state.allocValue();
|
||||
state.mkAttrs(*arg, 0);
|
||||
*arg->attrs = actualArgs;
|
||||
mkApp(v, fun, *arg);
|
||||
findJobs(state, doc, argsUsed, argsLeft, v, attrPath);
|
||||
return;
|
||||
}
|
||||
|
||||
AutoArgs::const_iterator a = argsLeft.find(cur->name);
|
||||
|
||||
if (a == argsLeft.end())
|
||||
throw TypeError(format("job `%1%' requires an argument named `%2%'")
|
||||
% attrPath % cur->name);
|
||||
|
||||
Formals::Formals_::iterator next = cur; ++next;
|
||||
|
||||
int n = 0;
|
||||
foreach (ValueList::const_iterator, i, a->second) {
|
||||
Bindings actualArgs2(actualArgs); // !!! inefficient
|
||||
ArgsUsed argsUsed2(argsUsed);
|
||||
AutoArgs argsLeft2(argsLeft);
|
||||
actualArgs2.push_back(Attr(cur->name, *i));
|
||||
actualArgs2.sort(); // !!! inefficient
|
||||
argsUsed2[cur->name] = std::pair<unsigned int, Value *>(n, *i);
|
||||
argsLeft2.erase(cur->name);
|
||||
tryJobAlts(state, doc, argsUsed2, argsLeft2, attrPath, fun, next, last, actualArgs2);
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void showArgsUsed(XMLWriter & doc, const ArgsUsed & argsUsed)
|
||||
{
|
||||
foreach (ArgsUsed::const_iterator, i, argsUsed) {
|
||||
XMLAttrs xmlAttrs2;
|
||||
xmlAttrs2["name"] = i->first;
|
||||
xmlAttrs2["value"] = (format("%1%") % *i->second.second).str();
|
||||
xmlAttrs2["altnr"] = int2String(i->second.first);
|
||||
doc.writeEmptyElement("arg", xmlAttrs2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static string queryMetaFieldString(MetaInfo & meta, const string & name)
|
||||
{
|
||||
MetaValue value = meta[name];
|
||||
if (value.type != MetaValue::tpString) return "";
|
||||
return value.stringValue;
|
||||
}
|
||||
|
||||
|
||||
static int queryMetaFieldInt(MetaInfo & meta, const string & name, int def)
|
||||
{
|
||||
MetaValue value = meta[name];
|
||||
if (value.type == MetaValue::tpInt) return value.intValue;
|
||||
if (value.type == MetaValue::tpString) {
|
||||
int n;
|
||||
if (string2Int(value.stringValue, n)) return n;
|
||||
}
|
||||
return def;
|
||||
}
|
||||
|
||||
|
||||
static void findJobsWrapped(EvalState & state, XMLWriter & doc,
|
||||
const ArgsUsed & argsUsed, const AutoArgs & argsLeft,
|
||||
Value & v, const string & attrPath)
|
||||
{
|
||||
debug(format("at path `%1%'") % attrPath);
|
||||
|
||||
state.forceValue(v);
|
||||
|
||||
if (v.type == tAttrs) {
|
||||
|
||||
DrvInfo drv;
|
||||
|
||||
if (getDerivation(state, v, drv)) {
|
||||
XMLAttrs xmlAttrs;
|
||||
Path drvPath;
|
||||
|
||||
xmlAttrs["jobName"] = attrPath;
|
||||
xmlAttrs["nixName"] = drv.name;
|
||||
xmlAttrs["system"] = drv.system;
|
||||
xmlAttrs["drvPath"] = drvPath = drv.queryDrvPath(state);
|
||||
xmlAttrs["outPath"] = drv.queryOutPath(state);
|
||||
MetaInfo meta = drv.queryMetaInfo(state);
|
||||
xmlAttrs["description"] = queryMetaFieldString(meta, "description");
|
||||
xmlAttrs["longDescription"] = queryMetaFieldString(meta, "longDescription");
|
||||
xmlAttrs["license"] = queryMetaFieldString(meta, "license");
|
||||
xmlAttrs["homepage"] = queryMetaFieldString(meta, "homepage");
|
||||
|
||||
int prio = queryMetaFieldInt(meta, "schedulingPriority", 100);
|
||||
xmlAttrs["schedulingPriority"] = int2String(prio);
|
||||
|
||||
int timeout = queryMetaFieldInt(meta, "timeout", 36000);
|
||||
xmlAttrs["timeout"] = int2String(timeout);
|
||||
|
||||
int maxsilent = queryMetaFieldInt(meta, "maxSilent", 3600);
|
||||
xmlAttrs["maxSilent"] = int2String(maxsilent);
|
||||
|
||||
string maintainers;
|
||||
MetaValue value = meta["maintainers"];
|
||||
if (value.type == MetaValue::tpString)
|
||||
maintainers = value.stringValue;
|
||||
else if (value.type == MetaValue::tpStrings) {
|
||||
foreach (Strings::const_iterator, i, value.stringValues) {
|
||||
if (maintainers.size() != 0) maintainers += ", ";
|
||||
maintainers += *i;
|
||||
}
|
||||
}
|
||||
xmlAttrs["maintainers"] = maintainers;
|
||||
|
||||
/* Register the derivation as a GC root. !!! This
|
||||
registers roots for jobs that we may have already
|
||||
done. */
|
||||
if (gcRootsDir != "") {
|
||||
Path root = gcRootsDir + "/" + baseNameOf(drvPath);
|
||||
if (!pathExists(root)) addPermRoot(*store, drvPath, root, false);
|
||||
}
|
||||
|
||||
XMLOpenElement _(doc, "job", xmlAttrs);
|
||||
showArgsUsed(doc, argsUsed);
|
||||
}
|
||||
|
||||
else {
|
||||
foreach (Bindings::iterator, i, *v.attrs)
|
||||
findJobs(state, doc, argsUsed, argsLeft, *i->value,
|
||||
(attrPath.empty() ? "" : attrPath + ".") + (string) i->name);
|
||||
}
|
||||
}
|
||||
|
||||
else if (v.type == tLambda && v.lambda.fun->matchAttrs) {
|
||||
tryJobAlts(state, doc, argsUsed, argsLeft, attrPath, v,
|
||||
v.lambda.fun->formals->formals.begin(),
|
||||
v.lambda.fun->formals->formals.end(),
|
||||
Bindings());
|
||||
}
|
||||
|
||||
else if (v.type == tNull) {
|
||||
// allow null values, meaning 'do nothing'
|
||||
}
|
||||
|
||||
else
|
||||
throw TypeError(format("unsupported value: %1%") % v);
|
||||
}
|
||||
|
||||
|
||||
static void findJobs(EvalState & state, XMLWriter & doc,
|
||||
const ArgsUsed & argsUsed, const AutoArgs & argsLeft,
|
||||
Value & v, const string & attrPath)
|
||||
{
|
||||
try {
|
||||
findJobsWrapped(state, doc, argsUsed, argsLeft, v, attrPath);
|
||||
} catch (EvalError & e) {
|
||||
XMLAttrs xmlAttrs;
|
||||
xmlAttrs["location"] = attrPath;
|
||||
xmlAttrs["msg"] = e.msg();
|
||||
XMLOpenElement _(doc, "error", xmlAttrs);
|
||||
showArgsUsed(doc, argsUsed);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void run(Strings args)
|
||||
{
|
||||
EvalState state;
|
||||
Path releaseExpr;
|
||||
AutoArgs autoArgs;
|
||||
|
||||
/* Prevent undeclared dependencies in the evaluation via
|
||||
$HYDRA_PATH. */
|
||||
unsetenv("HYDRA_PATH");
|
||||
|
||||
for (Strings::iterator i = args.begin(); i != args.end(); ) {
|
||||
string arg = *i++;
|
||||
if (arg == "--arg" || arg == "--argstr") {
|
||||
/* This is like --arg in nix-instantiate, except that it
|
||||
supports multiple versions for the same argument.
|
||||
That is, autoArgs is a mapping from variable names to
|
||||
*lists* of values. */
|
||||
if (i == args.end()) throw UsageError("missing argument");
|
||||
string name = *i++;
|
||||
if (i == args.end()) throw UsageError("missing argument");
|
||||
string value = *i++;
|
||||
Value * v = state.allocValue();
|
||||
if (arg == "--arg")
|
||||
state.eval(state.parseExprFromString(value, absPath(".")), *v);
|
||||
else
|
||||
mkString(*v, value);
|
||||
autoArgs[state.symbols.create(name)].push_back(v);
|
||||
}
|
||||
else if (arg == "--gc-roots-dir") {
|
||||
if (i == args.end()) throw UsageError("missing argument");
|
||||
gcRootsDir = *i++;
|
||||
}
|
||||
else if (parseSearchPathArg(arg, i, args.end(), state))
|
||||
;
|
||||
else if (arg[0] == '-')
|
||||
throw UsageError(format("unknown flag `%1%'") % arg);
|
||||
else
|
||||
releaseExpr = arg;
|
||||
}
|
||||
|
||||
if (releaseExpr == "") throw UsageError("no expression specified");
|
||||
|
||||
if (gcRootsDir == "") printMsg(lvlError, "warning: `--gc-roots-dir' not specified");
|
||||
|
||||
store = openStore();
|
||||
|
||||
Expr * e = state.parseExprFromFile(releaseExpr);
|
||||
Value v;
|
||||
state.mkThunk_(v, e);
|
||||
|
||||
std::cout.setf(std::ios::unitbuf);
|
||||
XMLWriter doc(true, std::cout);
|
||||
XMLOpenElement root(doc, "jobs");
|
||||
findJobs(state, doc, ArgsUsed(), autoArgs, v, "");
|
||||
|
||||
state.printStats();
|
||||
}
|
||||
|
||||
|
||||
string programId = "eval-jobs";
|
||||
@@ -1,521 +0,0 @@
|
||||
#include "db.hh"
|
||||
#include "hydra-config.hh"
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
#include <nix/util/signals.hh>
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
#include <cstring>
|
||||
#include <optional>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
typedef std::pair<std::string, std::string> JobsetName;
|
||||
|
||||
class JobsetId {
|
||||
public:
|
||||
|
||||
std::string project;
|
||||
std::string jobset;
|
||||
int id;
|
||||
|
||||
|
||||
JobsetId(const std::string & project, const std::string & jobset, int id)
|
||||
: project{ project }, jobset{ jobset }, id{ id }
|
||||
{
|
||||
}
|
||||
|
||||
friend bool operator== (const JobsetId & lhs, const JobsetId & rhs);
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetId & rhs);
|
||||
friend bool operator< (const JobsetId & lhs, const JobsetId & rhs);
|
||||
|
||||
|
||||
friend bool operator== (const JobsetId & lhs, const JobsetName & rhs);
|
||||
friend bool operator!= (const JobsetId & lhs, const JobsetName & rhs);
|
||||
|
||||
std::string display() const {
|
||||
return boost::str(boost::format("%1%:%2% (jobset#%3%)") % project % jobset % id);
|
||||
}
|
||||
};
|
||||
bool operator==(const JobsetId & lhs, const JobsetId & rhs)
|
||||
{
|
||||
return lhs.id == rhs.id;
|
||||
}
|
||||
|
||||
bool operator!=(const JobsetId & lhs, const JobsetId & rhs)
|
||||
{
|
||||
return lhs.id != rhs.id;
|
||||
}
|
||||
|
||||
bool operator<(const JobsetId & lhs, const JobsetId & rhs)
|
||||
{
|
||||
return lhs.id < rhs.id;
|
||||
}
|
||||
|
||||
bool operator==(const JobsetId & lhs, const JobsetName & rhs)
|
||||
{
|
||||
return lhs.project == rhs.first && lhs.jobset == rhs.second;
|
||||
}
|
||||
|
||||
bool operator!=(const JobsetId & lhs, const JobsetName & rhs)
|
||||
{
|
||||
return ! (lhs == rhs);
|
||||
}
|
||||
|
||||
enum class EvaluationStyle
|
||||
{
|
||||
SCHEDULE = 1,
|
||||
ONESHOT = 2,
|
||||
ONE_AT_A_TIME = 3,
|
||||
};
|
||||
|
||||
struct Evaluator
|
||||
{
|
||||
std::unique_ptr<HydraConfig> config;
|
||||
|
||||
nix::Pool<Connection> dbPool;
|
||||
|
||||
struct Jobset
|
||||
{
|
||||
JobsetId name;
|
||||
std::optional<EvaluationStyle> evaluation_style;
|
||||
time_t lastCheckedTime, triggerTime;
|
||||
int checkInterval;
|
||||
Pid pid;
|
||||
};
|
||||
|
||||
typedef std::map<JobsetId, Jobset> Jobsets;
|
||||
|
||||
std::optional<JobsetName> evalOne;
|
||||
|
||||
const size_t maxEvals;
|
||||
|
||||
struct State
|
||||
{
|
||||
size_t runningEvals = 0;
|
||||
Jobsets jobsets;
|
||||
};
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
std::condition_variable childStarted;
|
||||
std::condition_variable maybeDoWork;
|
||||
|
||||
const time_t notTriggered = std::numeric_limits<time_t>::max();
|
||||
|
||||
Evaluator()
|
||||
: config(std::make_unique<HydraConfig>())
|
||||
, maxEvals(std::max((size_t) 1, (size_t) config->getIntOption("max_concurrent_evals", 4)))
|
||||
{ }
|
||||
|
||||
void readJobsets()
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
auto res = txn.exec
|
||||
("select j.id as id, project, j.name, lastCheckedTime, triggerTime, checkInterval, j.enabled as jobset_enabled "
|
||||
"from Jobsets j "
|
||||
"join Projects p on j.project = p.name "
|
||||
"where j.enabled != 0 and p.enabled != 0");
|
||||
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
std::set<JobsetId> seen;
|
||||
|
||||
for (auto const & row : res) {
|
||||
auto name = JobsetId{row["project"].as<std::string>(), row["name"].as<std::string>(), row["id"].as<int>()};
|
||||
|
||||
if (evalOne && name != *evalOne) continue;
|
||||
|
||||
auto res = state->jobsets.try_emplace(name, Jobset{name});
|
||||
|
||||
auto & jobset = res.first->second;
|
||||
jobset.lastCheckedTime = row["lastCheckedTime"].as<time_t>(0);
|
||||
jobset.triggerTime = row["triggerTime"].as<time_t>(notTriggered);
|
||||
jobset.checkInterval = row["checkInterval"].as<time_t>();
|
||||
switch (row["jobset_enabled"].as<int>(0)) {
|
||||
case 1:
|
||||
jobset.evaluation_style = EvaluationStyle::SCHEDULE;
|
||||
break;
|
||||
case 2:
|
||||
jobset.evaluation_style = EvaluationStyle::ONESHOT;
|
||||
break;
|
||||
case 3:
|
||||
jobset.evaluation_style = EvaluationStyle::ONE_AT_A_TIME;
|
||||
break;
|
||||
}
|
||||
|
||||
seen.insert(name);
|
||||
}
|
||||
|
||||
if (evalOne && seen.empty()) {
|
||||
printError("the specified jobset does not exist or is disabled");
|
||||
std::_Exit(1);
|
||||
}
|
||||
|
||||
for (auto i = state->jobsets.begin(); i != state->jobsets.end(); )
|
||||
if (seen.count(i->first))
|
||||
++i;
|
||||
else {
|
||||
printInfo("forgetting jobset ‘%s’", i->first.display());
|
||||
i = state->jobsets.erase(i);
|
||||
}
|
||||
}
|
||||
|
||||
void startEval(State & state, Jobset & jobset)
|
||||
{
|
||||
time_t now = time(0);
|
||||
|
||||
printInfo("starting evaluation of jobset ‘%s’ (last checked %d s ago)",
|
||||
jobset.name.display(),
|
||||
now - jobset.lastCheckedTime);
|
||||
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
pqxx::work txn(*conn);
|
||||
txn.exec("update Jobsets set startTime = $1 where id = $2",
|
||||
pqxx::params{now, jobset.name.id}).no_rows();
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
assert(jobset.pid == -1);
|
||||
|
||||
jobset.pid = startProcess([&]() {
|
||||
Strings args = { "hydra-eval-jobset", jobset.name.project, jobset.name.jobset };
|
||||
execvp(args.front().c_str(), stringsToCharPtrs(args).data());
|
||||
throw SysError("executing ‘%1%’", args.front());
|
||||
});
|
||||
|
||||
state.runningEvals++;
|
||||
|
||||
childStarted.notify_one();
|
||||
}
|
||||
|
||||
bool shouldEvaluate(Jobset & jobset)
|
||||
{
|
||||
if (jobset.pid != -1) {
|
||||
// Already running.
|
||||
debug("shouldEvaluate %s? no: already running",
|
||||
jobset.name.display());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (jobset.triggerTime != std::numeric_limits<time_t>::max()) {
|
||||
// An evaluation of this Jobset is requested
|
||||
debug("shouldEvaluate %s? yes: requested",
|
||||
jobset.name.display());
|
||||
return true;
|
||||
}
|
||||
|
||||
if (jobset.checkInterval <= 0) {
|
||||
// Automatic scheduling is disabled. We allow requested
|
||||
// evaluations, but never schedule start one.
|
||||
debug("shouldEvaluate %s? no: checkInterval <= 0",
|
||||
jobset.name.display());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (jobset.lastCheckedTime + jobset.checkInterval <= time(0)) {
|
||||
// Time to schedule a fresh evaluation. If the jobset
|
||||
// is a ONE_AT_A_TIME jobset, ensure the previous jobset
|
||||
// has no remaining, unfinished work.
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
if (jobset.evaluation_style == EvaluationStyle::ONE_AT_A_TIME) {
|
||||
auto evaluation_res = txn.exec
|
||||
("select id from JobsetEvals "
|
||||
"where jobset_id = $1 "
|
||||
"order by id desc limit 1"
|
||||
,jobset.name.id
|
||||
);
|
||||
|
||||
if (evaluation_res.empty()) {
|
||||
// First evaluation, so allow scheduling.
|
||||
debug("shouldEvaluate(one-at-a-time) %s? yes: no prior eval",
|
||||
jobset.name.display());
|
||||
return true;
|
||||
}
|
||||
|
||||
auto evaluation_id = evaluation_res[0][0].as<int>();
|
||||
|
||||
auto unfinished_build_res = txn.exec
|
||||
("select id from Builds "
|
||||
"join JobsetEvalMembers "
|
||||
" on (JobsetEvalMembers.build = Builds.id) "
|
||||
"where JobsetEvalMembers.eval = $1 "
|
||||
" and builds.finished = 0 "
|
||||
" limit 1"
|
||||
,evaluation_id
|
||||
);
|
||||
|
||||
// If the previous evaluation has no unfinished builds
|
||||
// schedule!
|
||||
if (unfinished_build_res.empty()) {
|
||||
debug("shouldEvaluate(one-at-a-time) %s? yes: no unfinished builds",
|
||||
jobset.name.display());
|
||||
return true;
|
||||
} else {
|
||||
debug("shouldEvaluate(one-at-a-time) %s:%s? no: at least one unfinished build",
|
||||
jobset.name.display());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
} else {
|
||||
// EvaluationStyle::ONESHOT, EvaluationStyle::SCHEDULED
|
||||
debug("shouldEvaluate(oneshot/scheduled) %s? yes: checkInterval elapsed",
|
||||
jobset.name.display());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void startEvals(State & state)
|
||||
{
|
||||
std::vector<Jobsets::iterator> sorted;
|
||||
|
||||
/* Filter out jobsets that have been evaluated recently and have
|
||||
not been triggered. */
|
||||
for (auto i = state.jobsets.begin(); i != state.jobsets.end(); ++i)
|
||||
if (evalOne ||
|
||||
(i->second.evaluation_style && shouldEvaluate(i->second)))
|
||||
sorted.push_back(i);
|
||||
|
||||
/* Put jobsets in order of ascending trigger time, last checked
|
||||
time, and name. */
|
||||
std::sort(sorted.begin(), sorted.end(),
|
||||
[](const Jobsets::iterator & a, const Jobsets::iterator & b) {
|
||||
return
|
||||
a->second.triggerTime != b->second.triggerTime
|
||||
? a->second.triggerTime < b->second.triggerTime
|
||||
: a->second.lastCheckedTime != b->second.lastCheckedTime
|
||||
? a->second.lastCheckedTime < b->second.lastCheckedTime
|
||||
: a->first < b->first;
|
||||
});
|
||||
|
||||
/* Start jobset evaluations up to the concurrency limit.*/
|
||||
for (auto & i : sorted) {
|
||||
if (state.runningEvals >= maxEvals) break;
|
||||
startEval(state, i->second);
|
||||
}
|
||||
}
|
||||
|
||||
void loop()
|
||||
{
|
||||
auto state(state_.lock());
|
||||
|
||||
while (true) {
|
||||
|
||||
time_t now = time(0);
|
||||
|
||||
std::chrono::seconds sleepTime = std::chrono::seconds::max();
|
||||
|
||||
if (state->runningEvals < maxEvals) {
|
||||
for (auto & i : state->jobsets)
|
||||
if (i.second.pid == -1 &&
|
||||
i.second.checkInterval > 0)
|
||||
sleepTime = std::min(sleepTime, std::chrono::seconds(
|
||||
std::max((time_t) 1, i.second.lastCheckedTime - now + i.second.checkInterval)));
|
||||
}
|
||||
|
||||
debug("waiting for %d s", sleepTime.count());
|
||||
if (sleepTime == std::chrono::seconds::max())
|
||||
state.wait(maybeDoWork);
|
||||
else
|
||||
state.wait_for(maybeDoWork, sleepTime);
|
||||
|
||||
startEvals(*state);
|
||||
}
|
||||
}
|
||||
|
||||
/* A thread that listens to PostgreSQL notifications about jobset
|
||||
changes, updates the jobsets map, and signals the main thread
|
||||
to start evaluations. */
|
||||
void databaseMonitor()
|
||||
{
|
||||
while (true) {
|
||||
|
||||
try {
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver jobsetsAdded(*conn, "jobsets_added");
|
||||
receiver jobsetsDeleted(*conn, "jobsets_deleted");
|
||||
receiver jobsetsChanged(*conn, "jobset_scheduling_changed");
|
||||
|
||||
while (true) {
|
||||
/* Note: we read/notify before
|
||||
await_notification() to ensure we don't miss a
|
||||
state change. */
|
||||
readJobsets();
|
||||
maybeDoWork.notify_one();
|
||||
conn->await_notification();
|
||||
printInfo("received jobset event");
|
||||
}
|
||||
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in database monitor thread: %s", e.what());
|
||||
sleep(30);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* A thread that reaps child processes.*/
|
||||
void reaper()
|
||||
{
|
||||
while (true) {
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (!state->runningEvals)
|
||||
state.wait(childStarted);
|
||||
}
|
||||
|
||||
int status;
|
||||
pid_t pid = waitpid(-1, &status, 0);
|
||||
if (pid == -1) {
|
||||
if (errno == EINTR) continue;
|
||||
throw SysError("waiting for children");
|
||||
}
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
assert(state->runningEvals);
|
||||
state->runningEvals--;
|
||||
|
||||
// FIXME: should use a map.
|
||||
for (auto & i : state->jobsets) {
|
||||
auto & jobset(i.second);
|
||||
|
||||
if (jobset.pid == pid) {
|
||||
printInfo("evaluation of jobset ‘%s’ %s",
|
||||
jobset.name.display(), statusToString(status));
|
||||
|
||||
auto now = time(0);
|
||||
|
||||
jobset.triggerTime = notTriggered;
|
||||
jobset.lastCheckedTime = now;
|
||||
|
||||
try {
|
||||
|
||||
auto conn(dbPool.get());
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
/* Clear the trigger time to prevent this
|
||||
jobset from getting stuck in an endless
|
||||
failing eval loop. */
|
||||
txn.exec
|
||||
("update Jobsets set triggerTime = null where id = $1 and startTime is not null and triggerTime <= startTime",
|
||||
jobset.name.id).no_rows();
|
||||
|
||||
/* Clear the start time. */
|
||||
txn.exec
|
||||
("update Jobsets set startTime = null where id = $1",
|
||||
jobset.name.id).no_rows();
|
||||
|
||||
if (!WIFEXITED(status) || WEXITSTATUS(status) > 1) {
|
||||
txn.exec("update Jobsets set errorMsg = $1, lastCheckedTime = $2, errorTime = $2, fetchErrorMsg = null where id = $3",
|
||||
pqxx::params{fmt("evaluation %s", statusToString(status)), now, jobset.name.id}).no_rows();
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printError("exception setting jobset error: %s", e.what());
|
||||
}
|
||||
|
||||
jobset.pid.release();
|
||||
maybeDoWork.notify_one();
|
||||
|
||||
if (evalOne) std::_Exit(0);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void unlock()
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
pqxx::work txn(*conn);
|
||||
txn.exec("update Jobsets set startTime = null").no_rows();
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
void run()
|
||||
{
|
||||
unlock();
|
||||
|
||||
/* Can't be bothered to shut down cleanly. Goodbye! */
|
||||
auto callback = createInterruptCallback([&]() { std::_Exit(1); });
|
||||
|
||||
std::thread reaperThread([&]() { reaper(); });
|
||||
|
||||
std::thread monitorThread([&]() { databaseMonitor(); });
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
loop();
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in main loop: %s", e.what());
|
||||
sleep(30);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
initNix();
|
||||
|
||||
signal(SIGINT, SIG_DFL);
|
||||
signal(SIGTERM, SIG_DFL);
|
||||
signal(SIGHUP, SIG_DFL);
|
||||
|
||||
bool unlock = false;
|
||||
|
||||
Evaluator evaluator;
|
||||
|
||||
std::vector<std::string> args;
|
||||
|
||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||
if (*arg == "--unlock")
|
||||
unlock = true;
|
||||
else if (hasPrefix(*arg, "-"))
|
||||
return false;
|
||||
args.push_back(*arg);
|
||||
return true;
|
||||
});
|
||||
|
||||
|
||||
if (unlock)
|
||||
evaluator.unlock();
|
||||
else {
|
||||
if (!args.empty()) {
|
||||
if (args.size() != 2) throw UsageError("Syntax: hydra-evaluator [<project> <jobset>]");
|
||||
evaluator.evalOne = JobsetName(args[0], args[1]);
|
||||
}
|
||||
evaluator.run();
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
hydra_evaluator = executable('hydra-evaluator',
|
||||
'hydra-evaluator.cc',
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -1,646 +0,0 @@
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/path.hh>
|
||||
#include <nix/store/legacy-ssh-store.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include "state.hh"
|
||||
#include <nix/util/current-process.hh>
|
||||
#include <nix/util/processes.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/store/export-import.hh>
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/ssh.hh>
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/util/url.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
bool ::Machine::isLocalhost() const
|
||||
{
|
||||
return storeUri.params.empty() && std::visit(overloaded {
|
||||
[](const StoreReference::Auto &) {
|
||||
return true;
|
||||
},
|
||||
[](const StoreReference::Specified & s) {
|
||||
return
|
||||
(s.scheme == "local" || s.scheme == "unix") ||
|
||||
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
|
||||
s.authority == "localhost");
|
||||
},
|
||||
}, storeUri.variant);
|
||||
}
|
||||
|
||||
namespace nix::build_remote {
|
||||
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
{
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
if (machine->isLocalhost()) {
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
} else {
|
||||
auto remoteStore = machine->storeUri.params.find("remote-store");
|
||||
if (remoteStore != machine->storeUri.params.end()) {
|
||||
command.push_back("--store");
|
||||
command.push_back(escapeShellArgAlways(remoteStore->second));
|
||||
}
|
||||
}
|
||||
|
||||
auto ret = master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
});
|
||||
|
||||
// XXX: determine the actual max value we can use from /proc.
|
||||
|
||||
// FIXME: Should this be upstreamed into `startCommand` in Nix?
|
||||
|
||||
int pipesize = 1024 * 1024;
|
||||
|
||||
fcntl(ret->in.get(), F_SETPIPE_SZ, &pipesize);
|
||||
fcntl(ret->out.get(), F_SETPIPE_SZ, &pipesize);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
{
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
|
||||
/* Send the "query valid paths" command with the "lock" option
|
||||
enabled. This prevents a race where the remote host
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
auto sorted = destStore.topoSortPaths(closure);
|
||||
|
||||
StorePathSet missing;
|
||||
for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
|
||||
if (!present.count(*i)) missing.insert(*i);
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
exportPaths(destStore, missing, conn.to);
|
||||
conn.to.flush();
|
||||
|
||||
if (readInt(conn.from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
|
||||
std::function<void(const StorePath & path)> dfsVisit;
|
||||
|
||||
dfsVisit = [&](const StorePath & path) {
|
||||
if (!visited.insert(path).second) return;
|
||||
|
||||
auto info = paths.find(path);
|
||||
auto references = info == paths.end() ? StorePathSet() : info->second.references;
|
||||
|
||||
for (auto & i : references)
|
||||
/* Don't traverse into paths that don't exist. That can
|
||||
happen due to substitutes for non-existent paths. */
|
||||
if (i != path && paths.count(i))
|
||||
dfsVisit(i);
|
||||
|
||||
sorted.push_back(path);
|
||||
};
|
||||
|
||||
for (auto & i : paths)
|
||||
dfsVisit(i.first);
|
||||
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
if (auto * successP = result.tryGetSuccess()) {
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
successP->builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(
|
||||
conn, narMembers, localStore, destStore,
|
||||
ValidPathInfo { path, info });
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* using namespace nix::build_remote; */
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
|
||||
std::visit(overloaded{
|
||||
[&](const BuildResult::Success & success) {
|
||||
stepStatus = bsSuccess;
|
||||
switch (success.status) {
|
||||
case BuildResult::Success::Built:
|
||||
break;
|
||||
case BuildResult::Success::Substituted:
|
||||
case BuildResult::Success::AlreadyValid:
|
||||
case BuildResult::Success::ResolvesToAlreadyValid:
|
||||
isCached = true;
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
},
|
||||
[&](const BuildResult::Failure & failure) {
|
||||
errorMsg = failure.errorMsg;
|
||||
isNonDeterministic = failure.isNonDeterministic;
|
||||
switch (failure.status) {
|
||||
case BuildResult::Failure::PermanentFailure:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::Failure::InputRejected:
|
||||
case BuildResult::Failure::OutputRejected:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::Failure::TransientFailure:
|
||||
stepStatus = bsFailed;
|
||||
canRetry = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::Failure::TimedOut:
|
||||
stepStatus = bsTimedOut;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::Failure::MiscFailure:
|
||||
stepStatus = bsAborted;
|
||||
canRetry = true;
|
||||
break;
|
||||
case BuildResult::Failure::LogLimitExceeded:
|
||||
stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::Failure::NotDeterministic:
|
||||
stepStatus = bsNotDeterministic;
|
||||
canRetry = false;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::Failure::CachedFailure:
|
||||
case BuildResult::Failure::DependencyFailed:
|
||||
case BuildResult::Failure::NoSubstituters:
|
||||
case BuildResult::Failure::HashMismatch:
|
||||
stepStatus = bsAborted;
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
},
|
||||
}, buildResult.inner);
|
||||
}
|
||||
|
||||
/* Utility guard object to auto-release a semaphore on destruction. */
|
||||
template <typename T>
|
||||
class SemaphoreReleaser {
|
||||
public:
|
||||
SemaphoreReleaser(T* s) : sem(s) {}
|
||||
~SemaphoreReleaser() { sem->release(); }
|
||||
|
||||
private:
|
||||
T* sem;
|
||||
};
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::Failure::TimedOut == 8);
|
||||
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
auto storeRef = machine->completeStoreReference();
|
||||
|
||||
auto * pSpecified = std::get_if<StoreReference::Specified>(&storeRef.variant);
|
||||
if (!pSpecified || pSpecified->scheme != "ssh") {
|
||||
throw Error("Currently, only (legacy-)ssh stores are supported!");
|
||||
}
|
||||
|
||||
LegacySSHStoreConfig storeConfig {
|
||||
pSpecified->scheme,
|
||||
pSpecified->authority,
|
||||
storeRef.params
|
||||
};
|
||||
|
||||
auto master = storeConfig.createSSHMaster(
|
||||
false, // no SSH master yet
|
||||
logFD.get());
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child->sshPid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
activeStepState->pid = -1;
|
||||
|
||||
/* FIXME: there is a slight race here with step
|
||||
cancellation in State::processQueueChange(), which
|
||||
could call kill() on this pid after we've done waitpid()
|
||||
on it. With pid wrap-around, there is a tiny
|
||||
possibility that we end up killing another
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
});
|
||||
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
|
||||
try {
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->storeUri.render());
|
||||
} catch (EndOfFile & e) {
|
||||
child->sshPid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->storeUri.render(), s);
|
||||
}
|
||||
|
||||
{
|
||||
auto info(machine->state->connectInfo.lock());
|
||||
info->consecutiveFailures = 0;
|
||||
}
|
||||
|
||||
/* Gather the inputs. If the remote side is Nix <= 1.9, we have to
|
||||
copy the entire closure of ‘drvPath’, as well as the required
|
||||
outputs of the input derivations. On Nix > 1.9, we only need to
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
|
||||
logFileDel.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
if (ftruncate(logFD.get(), 0) == -1)
|
||||
throw SysError("truncating log file ‘%s’", result.logFile);
|
||||
|
||||
logFD = -1;
|
||||
|
||||
/* Do the build. */
|
||||
printMsg(lvlDebug, "building ‘%s’ on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath),
|
||||
machine->storeUri.render());
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
auto buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
|
||||
result.updateWithBuildResult(buildResult);
|
||||
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
|
||||
result.errorMsg = "";
|
||||
|
||||
/* If the path was substituted or already valid, then we didn't
|
||||
get a build log. */
|
||||
if (result.isCached) {
|
||||
printMsg(lvlInfo, "outputs of ‘%s’ substituted or already valid on ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render());
|
||||
unlink(result.logFile.c_str());
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
/* Throttle CPU-bound work. Opportunistically skip updating the current
|
||||
* step, since this requires a DB roundtrip. */
|
||||
if (!localWorkThrottler.try_acquire()) {
|
||||
MaintainCount<counter> mc(nrStepsWaitingForDownloadSlot);
|
||||
updateStep(ssWaitingForLocalSlot);
|
||||
localWorkThrottler.acquire();
|
||||
}
|
||||
SemaphoreReleaser releaser(&localWorkThrottler);
|
||||
|
||||
/* Once we've started copying outputs, release the machine reservation
|
||||
* so further builds can happen. We do not release the machine earlier
|
||||
* to avoid situations where the queue runner is bottlenecked on
|
||||
* copying outputs and we end up building too many things that we
|
||||
* haven't been able to allow copy slots for. */
|
||||
reservation.reset();
|
||||
wakeDispatcher();
|
||||
|
||||
StorePathSet outputs;
|
||||
if (auto * successP = buildResult.tryGetSuccess())
|
||||
for (auto & [_, realisation] : successP->builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
|
||||
MaintainCount<counter> mc(nrStepsCopyingFrom);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
auto infos = conn.queryPathInfos(*localStore, outputs);
|
||||
|
||||
size_t totalNarSize = 0;
|
||||
for (auto & [_, info] : infos) totalNarSize += info.narSize;
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Copy each path. */
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), totalNarSize);
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
if (auto * successP = buildResult.tryGetSuccess()) {
|
||||
for (auto & [outputName, realisation] : successP->builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
passed. This period increases on every consecutive
|
||||
failure. However, don't count failures that occurred soon
|
||||
after the last one (to take into account steps started in
|
||||
parallel). */
|
||||
auto info(machine->state->connectInfo.lock());
|
||||
auto now = std::chrono::system_clock::now();
|
||||
if (info->consecutiveFailures == 0 || info->lastFailure < now - std::chrono::seconds(30)) {
|
||||
info->consecutiveFailures = std::min(info->consecutiveFailures + 1, (unsigned int) 4);
|
||||
info->lastFailure = now;
|
||||
int delta = retryInterval * std::pow(retryBackoff, info->consecutiveFailures - 1) + (rand() % 30);
|
||||
printMsg(lvlInfo, "will disable machine ‘%1%’ for %2%s", machine->storeUri.render(), delta);
|
||||
info->disabledUntil = now + std::chrono::seconds(delta);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/util.hh>
|
||||
#include <nix/util/source-accessor.hh>
|
||||
|
||||
#include <regex>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const OutputPathMap derivationOutputs)
|
||||
{
|
||||
BuildOutput res;
|
||||
|
||||
/* Compute the closure size. */
|
||||
StorePathSet outputs;
|
||||
StorePathSet closure;
|
||||
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||
store->computeFSClosure(outputPath, closure);
|
||||
outputs.insert(outputPath);
|
||||
res.outputs.insert({outputName, outputPath});
|
||||
}
|
||||
for (auto & path : closure) {
|
||||
auto info = store->queryPathInfo(path);
|
||||
res.closureSize += info->narSize;
|
||||
if (outputs.count(path)) res.size += info->narSize;
|
||||
}
|
||||
|
||||
/* Fetch missing data. Usually buildRemote() will have extracted
|
||||
this data from the incoming NARs. */
|
||||
for (auto & output : outputs) {
|
||||
auto outputS = store->printStorePath(output);
|
||||
if (!narMembers.count(outputS)) {
|
||||
printInfo("fetching NAR contents of '%s'...", outputS);
|
||||
auto source = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
store->narFromPath(output, sink);
|
||||
});
|
||||
extractNarData(*source, outputS, narMembers);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get build products. */
|
||||
bool explicitProducts = false;
|
||||
|
||||
std::regex regex(
|
||||
"([a-zA-Z0-9_-]+)" // type (e.g. "doc")
|
||||
"[[:space:]]+"
|
||||
"([a-zA-Z0-9_-]+)" // subtype (e.g. "readme")
|
||||
"[[:space:]]+"
|
||||
"(\"[^\"]+\"|[^[:space:]<>\"]+)" // path (may be quoted)
|
||||
"([[:space:]]+([^[:space:]<>]+))?" // entry point
|
||||
, std::regex::extended);
|
||||
|
||||
for (auto & output : outputs) {
|
||||
auto outputS = store->printStorePath(output);
|
||||
|
||||
if (narMembers.count(outputS + "/nix-support/failed"))
|
||||
res.failed = true;
|
||||
|
||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||
if (productsFile == narMembers.end() ||
|
||||
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
assert(productsFile->second.contents);
|
||||
|
||||
explicitProducts = true;
|
||||
|
||||
for (auto & line : tokenizeString<Strings>(productsFile->second.contents.value(), "\n")) {
|
||||
BuildProduct product;
|
||||
|
||||
std::smatch match;
|
||||
if (!std::regex_match(line, match, regex)) continue;
|
||||
|
||||
product.type = match[1];
|
||||
product.subtype = match[2];
|
||||
std::string s(match[3]);
|
||||
product.path = s[0] == '"' && s.back() == '"' ? std::string(s, 1, s.size() - 2) : s;
|
||||
product.defaultPath = match[5];
|
||||
|
||||
/* Ensure that the path exists and points into the Nix
|
||||
store. */
|
||||
// FIXME: should we disallow products referring to other
|
||||
// store paths, or that are outside the input closure?
|
||||
if (product.path == "" || product.path[0] != '/') continue;
|
||||
product.path = canonPath(product.path);
|
||||
if (!store->isInStore(product.path)) continue;
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
if (file == narMembers.end()) continue;
|
||||
|
||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||
if (!std::regex_match(product.name, std::regex("[a-zA-Z0-9.@:_ -]*")))
|
||||
product.name = "";
|
||||
|
||||
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||
product.isRegular = true;
|
||||
product.fileSize = file->second.fileSize.value();
|
||||
product.sha256hash = file->second.sha256.value();
|
||||
}
|
||||
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
|
||||
/* If no build products were explicitly declared, then add all
|
||||
outputs as a product of type "nix-build". */
|
||||
if (!explicitProducts) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
BuildProduct product;
|
||||
product.path = store->printStorePath(output);
|
||||
product.type = "nix-build";
|
||||
product.subtype = name == "out" ? "" : name;
|
||||
product.name = output.name();
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
assert(file != narMembers.end());
|
||||
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the release name from $output/nix-support/hydra-release-name. */
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
auto contents = trim(file->second.contents.value());
|
||||
if (std::regex_match(contents, std::regex("[a-zA-Z0-9.@:_-]+")))
|
||||
res.releaseName = contents;
|
||||
}
|
||||
|
||||
/* Get metrics. */
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
continue;
|
||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||
if (fields.size() < 2) continue;
|
||||
if (!std::regex_match(fields[0], std::regex("[a-zA-Z0-9._-]+")))
|
||||
continue;
|
||||
BuildMetric metric;
|
||||
metric.name = fields[0];
|
||||
try {
|
||||
metric.value = std::stod(fields[1]);
|
||||
} catch (...) {
|
||||
continue; // skip this metric
|
||||
}
|
||||
metric.unit = fields.size() >= 3 ? fields[2] : "";
|
||||
if (!std::regex_match(metric.unit, std::regex("[a-zA-Z0-9._%-]+")))
|
||||
metric.unit = "";
|
||||
res.metrics[metric.name] = metric;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -1,506 +0,0 @@
|
||||
#include <cmath>
|
||||
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/util/finally.hh>
|
||||
#include <nix/store/binary-cache-store.hh>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
void setThreadName(const std::string & name)
|
||||
{
|
||||
#ifdef __linux__
|
||||
pthread_setname_np(pthread_self(), std::string(name, 0, 15).c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void State::builder(std::unique_ptr<MachineReservation> reservation)
|
||||
{
|
||||
setThreadName("bld~" + std::string(reservation->step->drvPath.to_string()));
|
||||
|
||||
StepResult res = sRetry;
|
||||
|
||||
nrStepsStarted++;
|
||||
|
||||
Step::wptr wstep = reservation->step;
|
||||
|
||||
{
|
||||
auto activeStep = std::make_shared<ActiveStep>();
|
||||
activeStep->step = reservation->step;
|
||||
activeSteps_.lock()->insert(activeStep);
|
||||
|
||||
Finally removeActiveStep([&]() {
|
||||
activeSteps_.lock()->erase(activeStep);
|
||||
});
|
||||
|
||||
std::string machine = reservation->machine->storeUri.render();
|
||||
|
||||
try {
|
||||
auto destStore = getDestStore();
|
||||
// Might release the reservation.
|
||||
res = doBuildStep(destStore, std::move(reservation), activeStep);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "uncaught exception building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
machine,
|
||||
e.what());
|
||||
}
|
||||
}
|
||||
|
||||
/* If there was a temporary failure, retry the step after an
|
||||
exponentially increasing interval. */
|
||||
Step::ptr step = wstep.lock();
|
||||
if (res != sDone && step) {
|
||||
|
||||
if (res == sRetry) {
|
||||
auto step_(step->state.lock());
|
||||
step_->tries++;
|
||||
nrRetries++;
|
||||
if (step_->tries > maxNrRetries) maxNrRetries = step_->tries; // yeah yeah, not atomic
|
||||
int delta = retryInterval * std::pow(retryBackoff, step_->tries - 1) + (rand() % 10);
|
||||
printMsg(lvlInfo, "will retry ‘%s’ after %ss", localStore->printStorePath(step->drvPath), delta);
|
||||
step_->after = std::chrono::system_clock::now() + std::chrono::seconds(delta);
|
||||
}
|
||||
|
||||
makeRunnable(step);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep)
|
||||
{
|
||||
auto step(reservation->step);
|
||||
auto machine(reservation->machine);
|
||||
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
assert(step_->created);
|
||||
assert(!step->finished);
|
||||
}
|
||||
|
||||
/* There can be any number of builds in the database that depend
|
||||
on this derivation. Arbitrarily pick one (though preferring a
|
||||
build of which this is the top-level derivation) for the
|
||||
purpose of creating build steps. We could create a build step
|
||||
record for every build, but that could be very expensive
|
||||
(e.g. a stdenv derivation can be a dependency of tens of
|
||||
thousands of builds), so we don't.
|
||||
|
||||
We don't keep a Build::ptr here to allow
|
||||
State::processQueueChange() to detect whether a step can be
|
||||
cancelled (namely if there are no more Builds referring to
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
{
|
||||
std::set<Build::ptr> dependents;
|
||||
std::set<Step::ptr> steps;
|
||||
getDependents(step, dependents, steps);
|
||||
|
||||
if (dependents.empty()) {
|
||||
/* Apparently all builds that depend on this derivation
|
||||
are gone (e.g. cancelled). So don't bother. This is
|
||||
very unlikely to happen, because normally Steps are
|
||||
only kept alive by being reachable from a
|
||||
Build. However, it's possible that a new Build just
|
||||
created a reference to this step. So to handle that
|
||||
possibility, we retry this step (putting it back in
|
||||
the runnable queue). If there are really no strong
|
||||
pointers to the step, it will be deleted. */
|
||||
printMsg(lvlInfo, "maybe cancelling build step ‘%s’", localStore->printStorePath(step->drvPath));
|
||||
return sMaybeCancelled;
|
||||
}
|
||||
|
||||
Build::ptr build;
|
||||
|
||||
for (auto build2 : dependents) {
|
||||
if (build2->drvPath == step->drvPath) {
|
||||
build = build2;
|
||||
pqxx::work txn(*conn);
|
||||
notifyBuildStarted(txn, build->id);
|
||||
txn.commit();
|
||||
}
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
buildId = build->id;
|
||||
buildDrvPath = build->drvPath;
|
||||
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
if (!buildOneDone)
|
||||
buildOneDone = buildId == buildOne && step->drvPath == *buildDrvPath;
|
||||
|
||||
RemoteResult result;
|
||||
BuildOutput res;
|
||||
unsigned int stepNr = 0;
|
||||
bool stepFinished = false;
|
||||
|
||||
Finally clearStep([&]() {
|
||||
if (stepNr && !stepFinished) {
|
||||
printError("marking step %d of build %d as orphaned", stepNr, buildId);
|
||||
auto orphanedSteps_(orphanedSteps.lock());
|
||||
orphanedSteps_->emplace(buildId, stepNr);
|
||||
}
|
||||
|
||||
if (stepNr) {
|
||||
/* Upload the log file to the binary cache. FIXME: should
|
||||
be done on a worker thread. */
|
||||
try {
|
||||
auto store = destStore.dynamic_pointer_cast<BinaryCacheStore>();
|
||||
if (uploadLogsToBinaryCache && store && pathExists(result.logFile)) {
|
||||
store->upsertFile("log/" + std::string(step->drvPath.to_string()), readFile(result.logFile), "text/plain; charset=utf-8");
|
||||
unlink(result.logFile.c_str());
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreExceptionInDestructor();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
time_t stepStartTime = result.startTime = time(0);
|
||||
|
||||
/* If any of the outputs have previously failed, then don't bother
|
||||
building again. */
|
||||
if (checkCachedFailure(step, *conn))
|
||||
result.stepStatus = bsCachedFailure;
|
||||
else {
|
||||
|
||||
/* Create a build step record indicating that we started
|
||||
building. */
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(*conn);
|
||||
stepNr = createBuildStep(txn, result.startTime, buildId, step, machine->storeUri.render(), bsBusy);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
auto updateStep = [&](StepState stepState) {
|
||||
pqxx::work txn(*conn);
|
||||
updateBuildStep(txn, buildId, stepNr, stepState);
|
||||
txn.commit();
|
||||
};
|
||||
|
||||
/* Do the build. */
|
||||
NarMemberDatas narMembers;
|
||||
|
||||
try {
|
||||
/* FIXME: referring builds may have conflicting timeouts. */
|
||||
buildRemote(destStore, std::move(reservation), machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||
} catch (Error & e) {
|
||||
if (activeStep->state_.lock()->cancelled) {
|
||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||
result.stepStatus = bsCancelled;
|
||||
result.canRetry = false;
|
||||
} else {
|
||||
result.stepStatus = bsAborted;
|
||||
result.errorMsg = e.msg();
|
||||
result.canRetry = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
updateStep(ssPostProcessing);
|
||||
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||
}
|
||||
}
|
||||
|
||||
time_t stepStopTime = time(0);
|
||||
if (!result.stopTime) result.stopTime = stepStopTime;
|
||||
|
||||
/* For standard failures, we don't care about the error
|
||||
message. */
|
||||
if (result.stepStatus != bsAborted)
|
||||
result.errorMsg = "";
|
||||
|
||||
/* Account the time we spent building this step by dividing it
|
||||
among the jobsets that depend on it. */
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
if (!step_->jobsets.empty()) {
|
||||
// FIXME: loss of precision.
|
||||
time_t charge = (result.stopTime - result.startTime) / step_->jobsets.size();
|
||||
for (auto & jobset : step_->jobsets)
|
||||
jobset->addStep(result.startTime, charge);
|
||||
}
|
||||
}
|
||||
|
||||
/* Finish the step in the database. */
|
||||
if (stepNr) {
|
||||
pqxx::work txn(*conn);
|
||||
finishBuildStep(txn, result, buildId, stepNr, machine->storeUri.render());
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
/* The step had a hopefully temporary failure (e.g. network
|
||||
issue). Retry a number of times. */
|
||||
if (result.canRetry) {
|
||||
printMsg(lvlError, "possibly transient failure building ‘%s’ on ‘%s’: %s",
|
||||
localStore->printStorePath(step->drvPath), machine->storeUri.render(), result.errorMsg);
|
||||
assert(stepNr);
|
||||
bool retry;
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
retry = step_->tries + 1 < maxTries;
|
||||
}
|
||||
if (retry) {
|
||||
auto mc = startDbUpdate();
|
||||
stepFinished = true;
|
||||
if (buildOneDone) exit(1);
|
||||
return sRetry;
|
||||
}
|
||||
}
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
|
||||
assert(stepNr);
|
||||
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||
if (!optOutputPath)
|
||||
throw Error(
|
||||
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||
outputName, localStore->printStorePath(step->drvPath));
|
||||
addRoot(*optOutputPath);
|
||||
}
|
||||
|
||||
/* Register success in the database for all Build objects that
|
||||
have this step as the top-level step. Since the queue
|
||||
monitor thread may be creating new referring Builds
|
||||
concurrently, and updating the database may fail, we do
|
||||
this in a loop, marking all known builds, repeating until
|
||||
there are no unmarked builds.
|
||||
*/
|
||||
|
||||
std::vector<BuildID> buildIDs;
|
||||
|
||||
while (true) {
|
||||
|
||||
/* Get the builds that have this one as the top-level. */
|
||||
std::vector<Build::ptr> direct;
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
auto step_(step->state.lock());
|
||||
|
||||
for (auto & b_ : step_->builds) {
|
||||
auto b = b_.lock();
|
||||
if (b && !b->finishedInDB) direct.push_back(b);
|
||||
}
|
||||
|
||||
/* If there are no builds left to update in the DB,
|
||||
then we're done (except for calling
|
||||
finishBuildStep()). Delete the step from
|
||||
‘steps’. Since we've been holding the ‘steps’ lock,
|
||||
no new referrers can have been added in the
|
||||
meantime or be added afterwards. */
|
||||
if (direct.empty()) {
|
||||
printMsg(lvlDebug, "finishing build step ‘%s’",
|
||||
localStore->printStorePath(step->drvPath));
|
||||
steps_->erase(step->drvPath);
|
||||
}
|
||||
}
|
||||
|
||||
/* Update the database. */
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||
result.startTime, result.stopTime);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
stepFinished = true;
|
||||
|
||||
if (direct.empty()) break;
|
||||
|
||||
/* Remove the direct dependencies from ‘builds’. This will
|
||||
cause them to be destroyed. */
|
||||
for (auto & b : direct) {
|
||||
auto builds_(builds.lock());
|
||||
b->finishedInDB = true;
|
||||
builds_->erase(b->id);
|
||||
buildIDs.push_back(b->id);
|
||||
}
|
||||
}
|
||||
|
||||
/* Send notification about the builds that have this step as
|
||||
the top-level. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
for (auto id : buildIDs)
|
||||
notifyBuildFinished(txn, id, {});
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
/* Wake up any dependent steps that have no other
|
||||
dependencies. */
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
for (auto & rdepWeak : step_->rdeps) {
|
||||
auto rdep = rdepWeak.lock();
|
||||
if (!rdep) continue;
|
||||
|
||||
bool runnable = false;
|
||||
{
|
||||
auto rdep_(rdep->state.lock());
|
||||
rdep_->deps.erase(step);
|
||||
/* Note: if the step has not finished
|
||||
initialisation yet, it will be made runnable in
|
||||
createStep(), if appropriate. */
|
||||
if (rdep_->deps.empty() && rdep_->created) runnable = true;
|
||||
}
|
||||
|
||||
if (runnable) makeRunnable(rdep);
|
||||
}
|
||||
}
|
||||
|
||||
} else
|
||||
failStep(*conn, step, buildId, result, machine, stepFinished);
|
||||
|
||||
// FIXME: keep stats about aborted steps?
|
||||
nrStepsDone++;
|
||||
totalStepTime += stepStopTime - stepStartTime;
|
||||
totalStepBuildTime += result.stopTime - result.startTime;
|
||||
machine->state->nrStepsDone++;
|
||||
machine->state->totalStepTime += stepStopTime - stepStartTime;
|
||||
machine->state->totalStepBuildTime += result.stopTime - result.startTime;
|
||||
|
||||
if (buildOneDone) exit(0); // testing hack; FIXME: this won't run plugins
|
||||
|
||||
return sDone;
|
||||
}
|
||||
|
||||
|
||||
void State::failStep(
|
||||
Connection & conn,
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
::Machine::ptr machine,
|
||||
bool & stepFinished)
|
||||
{
|
||||
/* Register failure in the database for all Build objects that
|
||||
directly or indirectly depend on this step. */
|
||||
|
||||
std::vector<BuildID> dependentIDs;
|
||||
|
||||
while (true) {
|
||||
/* Get the builds and steps that depend on this step. */
|
||||
std::set<Build::ptr> indirect;
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
std::set<Step::ptr> steps;
|
||||
getDependents(step, indirect, steps);
|
||||
|
||||
/* If there are no builds left, delete all referring
|
||||
steps from ‘steps’. As for the success case, we can
|
||||
be certain no new referrers can be added. */
|
||||
if (indirect.empty()) {
|
||||
for (auto & s : steps) {
|
||||
printMsg(lvlDebug, "finishing build step ‘%s’",
|
||||
localStore->printStorePath(s->drvPath));
|
||||
steps_->erase(s->drvPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (indirect.empty() && stepFinished) break;
|
||||
|
||||
/* Update the database. */
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
|
||||
pqxx::work txn(conn);
|
||||
|
||||
/* Create failed build steps for every build that
|
||||
depends on this, except when this step is cached
|
||||
and is the top-level of that build (since then it's
|
||||
redundant with the build's isCachedBuild field). */
|
||||
for (auto & build : indirect) {
|
||||
if ((result.stepStatus == bsCachedFailure && build->drvPath == step->drvPath) ||
|
||||
((result.stepStatus != bsCachedFailure && result.stepStatus != bsUnsupported) && buildId == build->id) ||
|
||||
build->finishedInDB)
|
||||
continue;
|
||||
createBuildStep(txn,
|
||||
0, build->id, step, machine ? machine->storeUri.render() : "",
|
||||
result.stepStatus, result.errorMsg, buildId == build->id ? 0 : buildId);
|
||||
}
|
||||
|
||||
/* Mark all builds that depend on this derivation as failed. */
|
||||
for (auto & build : indirect) {
|
||||
if (build->finishedInDB) continue;
|
||||
printError("marking build %1% as failed", build->id);
|
||||
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||
pqxx::params{build->id,
|
||||
(int) (build->drvPath != step->drvPath && result.buildStatus() == bsFailed ? bsDepFailed : result.buildStatus()),
|
||||
result.startTime,
|
||||
result.stopTime,
|
||||
result.stepStatus == bsCachedFailure ? 1 : 0}).no_rows();
|
||||
nrBuildsDone++;
|
||||
}
|
||||
|
||||
/* Remember failed paths in the database so that they
|
||||
won't be built again. */
|
||||
if (result.stepStatus != bsCachedFailure && result.canCache)
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
txn.exec("insert into FailedPaths values ($1)", pqxx::params{localStore->printStorePath(*i.second.second)}).no_rows();
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
stepFinished = true;
|
||||
|
||||
/* Remove the indirect dependencies from ‘builds’. This
|
||||
will cause them to be destroyed. */
|
||||
for (auto & b : indirect) {
|
||||
auto builds_(builds.lock());
|
||||
b->finishedInDB = true;
|
||||
builds_->erase(b->id);
|
||||
dependentIDs.push_back(b->id);
|
||||
if (!buildOneDone && buildOne == b->id) buildOneDone = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Send notification about this build and its dependents. */
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
notifyBuildFinished(txn, buildId, dependentIDs);
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::addRoot(const StorePath & storePath)
|
||||
{
|
||||
auto root = rootsDir + "/" + std::string(storePath.to_string());
|
||||
if (!pathExists(root)) writeFile(root, "");
|
||||
}
|
||||
@@ -1,478 +0,0 @@
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "state.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
void State::makeRunnable(Step::ptr step)
|
||||
{
|
||||
printMsg(lvlChatty, "step ‘%s’ is now runnable", localStore->printStorePath(step->drvPath));
|
||||
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
assert(step_->created);
|
||||
assert(!step->finished);
|
||||
assert(step_->deps.empty());
|
||||
step_->runnableSince = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnable_->push_back(step);
|
||||
}
|
||||
|
||||
wakeDispatcher();
|
||||
}
|
||||
|
||||
|
||||
void State::dispatcher()
|
||||
{
|
||||
printMsg(lvlDebug, "Waiting for the machines parsing to have completed at least once");
|
||||
machinesReadyLock.lock();
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
printMsg(lvlDebug, "dispatcher woken up");
|
||||
nrDispatcherWakeups++;
|
||||
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
auto sleepUntil = doDispatch();
|
||||
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.dispatcher_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
dispatchTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(t_after_work - t_before_work).count();
|
||||
|
||||
/* Sleep until we're woken up (either because a runnable build
|
||||
is added, or because a build finishes). */
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
if (!*dispatcherWakeup_) {
|
||||
debug("dispatcher sleeping for %1%s",
|
||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||
}
|
||||
*dispatcherWakeup_ = false;
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.dispatcher_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printError("dispatcher: %s", e.what());
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
printMsg(lvlError, "dispatcher exits");
|
||||
}
|
||||
|
||||
|
||||
system_time State::doDispatch()
|
||||
{
|
||||
/* Prune old historical build step info from the jobsets. */
|
||||
{
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
auto s1 = jobset.second->shareUsed();
|
||||
jobset.second->pruneSteps();
|
||||
auto s2 = jobset.second->shareUsed();
|
||||
if (s1 != s2)
|
||||
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||
jobset.first.first, jobset.first.second, s1, s2);
|
||||
}
|
||||
}
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Start steps until we're out of steps or slots. */
|
||||
auto sleepUntil = system_time::max();
|
||||
bool keepGoing;
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
bool alreadyScheduled = false;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
size_t numRequiredSystemFeatures;
|
||||
size_t numRevDeps;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
numRequiredSystemFeatures = step->requiredSystemFeatures.size();
|
||||
numRevDeps = step_.rdeps.size();
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.numRequiredSystemFeatures != b.numRequiredSystemFeatures ? a.numRequiredSystemFeatures > b.numRequiredSystemFeatures :
|
||||
a.numRevDeps != b.numRevDeps ? a.numRevDeps > b.numRevDeps :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
do {
|
||||
now = std::chrono::system_clock::now();
|
||||
|
||||
/* Copy the currentJobs field of each machine. This is
|
||||
necessary to ensure that the sort comparator below is
|
||||
an ordering. std::sort() can segfault if it isn't. Also
|
||||
filter out temporarily disabled machines. */
|
||||
struct MachineInfo
|
||||
{
|
||||
::Machine::ptr machine;
|
||||
unsigned long currentJobs;
|
||||
};
|
||||
std::vector<MachineInfo> machinesSorted;
|
||||
{
|
||||
auto machines_(machines.lock());
|
||||
for (auto & m : *machines_) {
|
||||
auto info(m.second->state->connectInfo.lock());
|
||||
if (!m.second->enabled) continue;
|
||||
if (info->consecutiveFailures && info->disabledUntil > now) {
|
||||
if (info->disabledUntil < sleepUntil)
|
||||
sleepUntil = info->disabledUntil;
|
||||
continue;
|
||||
}
|
||||
machinesSorted.push_back({m.second, m.second->state->currentJobs});
|
||||
}
|
||||
}
|
||||
|
||||
/* Sort the machines by a combination of speed factor and
|
||||
available slots. Prioritise the available machines as
|
||||
follows:
|
||||
|
||||
- First by load divided by speed factor, rounded to the
|
||||
nearest integer. This causes fast machines to be
|
||||
preferred over slow machines with similar loads.
|
||||
|
||||
- Then by speed factor.
|
||||
|
||||
- Finally by load. */
|
||||
sort(machinesSorted.begin(), machinesSorted.end(),
|
||||
[](const MachineInfo & a, const MachineInfo & b) -> bool
|
||||
{
|
||||
float ta = std::round(a.currentJobs / a.machine->speedFactor);
|
||||
float tb = std::round(b.currentJobs / b.machine->speedFactor);
|
||||
return
|
||||
ta != tb ? ta < tb :
|
||||
a.machine->speedFactor != b.machine->speedFactor ? a.machine->speedFactor > b.machine->speedFactor :
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Find a machine with a free slot and find a step to run
|
||||
on it. Once we find such a pair, we restart the outer
|
||||
loop because the machine sorting will have changed. */
|
||||
keepGoing = false;
|
||||
|
||||
for (auto & mi : machinesSorted) {
|
||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||
|
||||
for (auto & stepInfo : runnableSorted) {
|
||||
if (stepInfo.alreadyScheduled) continue;
|
||||
|
||||
auto & step(stepInfo.step);
|
||||
|
||||
/* Can this machine do this step? */
|
||||
if (!mi.machine->supportsStep(step)) {
|
||||
debug("machine '%s' does not support step '%s' (system type '%s')",
|
||||
mi.machine->storeUri.render(), localStore->printStorePath(step->drvPath), step->drv->platform);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Let's do this step. Remove it from the runnable
|
||||
list. FIXME: O(n). */
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
bool removed = false;
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock() == step) {
|
||||
i = runnable_->erase(i);
|
||||
removed = true;
|
||||
break;
|
||||
} else ++i;
|
||||
assert(removed);
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
assert(r.count);
|
||||
r.count--;
|
||||
}
|
||||
|
||||
stepInfo.alreadyScheduled = true;
|
||||
|
||||
/* Make a slot reservation and start a thread to
|
||||
do the build. */
|
||||
auto builderThread = std::thread(&State::builder, this,
|
||||
std::make_unique<MachineReservation>(*this, step, mi.machine));
|
||||
builderThread.detach(); // FIXME?
|
||||
|
||||
keepGoing = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (keepGoing) break;
|
||||
}
|
||||
|
||||
/* Update the stats for the auto-scaler. */
|
||||
{
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
|
||||
for (auto & i : *machineTypes_)
|
||||
i.second.runnable = 0;
|
||||
|
||||
for (auto & i : runnablePerType) {
|
||||
auto & j = (*machineTypes_)[i.first];
|
||||
j.runnable = i.second.count;
|
||||
j.waitTime = i.second.waitTime;
|
||||
}
|
||||
}
|
||||
|
||||
lastDispatcherCheck = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
} while (keepGoing);
|
||||
|
||||
abortUnsupported();
|
||||
|
||||
return sleepUntil;
|
||||
}
|
||||
|
||||
|
||||
void State::wakeDispatcher()
|
||||
{
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
*dispatcherWakeup_ = true;
|
||||
}
|
||||
dispatcherWakeupCV.notify_one();
|
||||
}
|
||||
|
||||
|
||||
void State::abortUnsupported()
|
||||
{
|
||||
/* Make a copy of 'runnable' and 'machines' so we don't block them
|
||||
very long. */
|
||||
auto runnable2 = *runnable.lock();
|
||||
auto machines2 = *machines.lock();
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
auto now2 = time(0);
|
||||
|
||||
std::unordered_set<Step::ptr> aborted;
|
||||
|
||||
size_t count = 0;
|
||||
|
||||
for (auto & wstep : runnable2) {
|
||||
auto step(wstep.lock());
|
||||
if (!step) continue;
|
||||
|
||||
bool supported = false;
|
||||
for (auto & machine : machines2) {
|
||||
if (machine.second->supportsStep(step)) {
|
||||
step->state.lock()->lastSupported = now;
|
||||
supported = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!supported)
|
||||
count++;
|
||||
|
||||
if (!supported
|
||||
&& std::chrono::duration_cast<std::chrono::seconds>(now - step->state.lock()->lastSupported).count() >= maxUnsupportedTime)
|
||||
{
|
||||
printError("aborting unsupported build step '%s' (type '%s')",
|
||||
localStore->printStorePath(step->drvPath),
|
||||
step->systemType);
|
||||
|
||||
aborted.insert(step);
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
std::set<Build::ptr> dependents;
|
||||
std::set<Step::ptr> steps;
|
||||
getDependents(step, dependents, steps);
|
||||
|
||||
/* Maybe the step got cancelled. */
|
||||
if (dependents.empty()) continue;
|
||||
|
||||
/* Find the build that has this step as the top-level (if
|
||||
any). */
|
||||
Build::ptr build;
|
||||
for (auto build2 : dependents) {
|
||||
if (build2->drvPath == step->drvPath)
|
||||
build = build2;
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
bool stepFinished = false;
|
||||
|
||||
failStep(
|
||||
*conn, step, build->id,
|
||||
RemoteResult {
|
||||
.stepStatus = bsUnsupported,
|
||||
.errorMsg = fmt("unsupported system type '%s'",
|
||||
step->systemType),
|
||||
.startTime = now2,
|
||||
.stopTime = now2,
|
||||
},
|
||||
nullptr, stepFinished);
|
||||
|
||||
if (buildOneDone) exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Clean up 'runnable'. */
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
if (aborted.count(i->lock()))
|
||||
i = runnable_->erase(i);
|
||||
else
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
nrUnsupportedSteps = count;
|
||||
}
|
||||
|
||||
|
||||
void Jobset::addStep(time_t startTime, time_t duration)
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
(*steps_)[startTime] = duration;
|
||||
seconds += duration;
|
||||
}
|
||||
|
||||
|
||||
void Jobset::pruneSteps()
|
||||
{
|
||||
time_t now = time(0);
|
||||
auto steps_(steps.lock());
|
||||
while (!steps_->empty()) {
|
||||
auto i = steps_->begin();
|
||||
if (i->first > now - schedulingWindow) break;
|
||||
seconds -= i->second;
|
||||
steps_->erase(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||
: state(state), step(step), machine(machine)
|
||||
{
|
||||
machine->state->currentJobs++;
|
||||
|
||||
{
|
||||
auto machineTypes_(state.machineTypes.lock());
|
||||
(*machineTypes_)[step->systemType].running++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::~MachineReservation()
|
||||
{
|
||||
auto prev = machine->state->currentJobs--;
|
||||
assert(prev);
|
||||
if (prev == 1)
|
||||
machine->state->idleSince = time(0);
|
||||
|
||||
{
|
||||
auto machineTypes_(state.machineTypes.lock());
|
||||
auto & machineType = (*machineTypes_)[step->systemType];
|
||||
assert(machineType.running);
|
||||
machineType.running--;
|
||||
if (machineType.running == 0)
|
||||
machineType.lastActive = std::chrono::system_clock::now();
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <nix/util/hash.hh>
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
struct BuildProduct
|
||||
{
|
||||
nix::Path path, defaultPath;
|
||||
std::string type, subtype, name;
|
||||
bool isRegular = false;
|
||||
std::optional<nix::Hash> sha256hash;
|
||||
std::optional<off_t> fileSize;
|
||||
BuildProduct() { }
|
||||
};
|
||||
|
||||
struct BuildMetric
|
||||
{
|
||||
std::string name, unit;
|
||||
double value;
|
||||
};
|
||||
|
||||
struct BuildOutput
|
||||
{
|
||||
/* Whether this build has failed with output, i.e., the build
|
||||
finished with exit code 0 but produced a file
|
||||
$out/nix-support/failed. */
|
||||
bool failed = false;
|
||||
|
||||
std::string releaseName;
|
||||
|
||||
uint64_t closureSize = 0, size = 0;
|
||||
|
||||
std::list<BuildProduct> products;
|
||||
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
|
||||
std::map<std::string, BuildMetric> metrics;
|
||||
};
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<nix::Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const nix::OutputPathMap derivationOutputs);
|
||||
@@ -1,968 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include <prometheus/exposer.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include <nix/util/signals.hh>
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/store-open.hh>
|
||||
#include <nix/store/remote-store.hh>
|
||||
|
||||
#include <nix/store/globals.hh>
|
||||
#include "hydra-config.hh"
|
||||
#include <nix/store/s3-binary-cache-store.hh>
|
||||
#include <nix/main/shared.hh>
|
||||
|
||||
using namespace nix;
|
||||
using nlohmann::json;
|
||||
|
||||
|
||||
std::string getEnvOrDie(const std::string & key)
|
||||
{
|
||||
auto value = getEnv(key);
|
||||
if (!value) throw Error("environment variable '%s' is not set", key);
|
||||
return *value;
|
||||
}
|
||||
|
||||
State::PromMetrics::PromMetrics()
|
||||
: registry(std::make_shared<prometheus::Registry>())
|
||||
, queue_checks_started(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_started_total")
|
||||
.Help("Number of times State::getQueuedBuilds() was started")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_build_loads(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_build_loads_total")
|
||||
.Help("Number of builds loaded")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_steps_created(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_steps_created_total")
|
||||
.Help("Number of steps created")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_checks_early_exits(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_early_exits_total")
|
||||
.Help("Number of times State::getQueuedBuilds() yielded to potential bumps")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_checks_finished(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_checks_finished_total")
|
||||
.Help("Number of times State::getQueuedBuilds() was completed")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_running")
|
||||
.Help("Time (in micros) spent running the dispatcher")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, dispatcher_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_dispatcher_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the dispatcher to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_running(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_running")
|
||||
.Help("Time (in micros) spent running the queue monitor")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
, queue_monitor_time_spent_waiting(
|
||||
prometheus::BuildCounter()
|
||||
.Name("hydraqueuerunner_queue_monitor_time_spent_waiting")
|
||||
.Help("Time (in micros) spent waiting for the queue monitor to obtain work")
|
||||
.Register(*registry)
|
||||
.Add({})
|
||||
)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
State::State(std::optional<std::string> metricsAddrOpt)
|
||||
: config(std::make_unique<HydraConfig>())
|
||||
, maxUnsupportedTime(config->getIntOption("max_unsupported_time", 0))
|
||||
, dbPool(config->getIntOption("max_db_connections", 128))
|
||||
, localWorkThrottler(config->getIntOption("max_local_worker_threads", std::min(maxSupportedLocalWorkers, std::max(4u, std::thread::hardware_concurrency()) - 2)))
|
||||
, maxOutputSize(config->getIntOption("max_output_size", 2ULL << 30))
|
||||
, maxLogSize(config->getIntOption("max_log_size", 64ULL << 20))
|
||||
, uploadLogsToBinaryCache(config->getBoolOption("upload_logs_to_binary_cache", false))
|
||||
, rootsDir(config->getStrOption("gc_roots_dir", fmt("%s/gcroots/per-user/%s/hydra-roots", settings.nixStateDir, getEnvOrDie("LOGNAME"))))
|
||||
, metricsAddr(config->getStrOption("queue_runner_metrics_address", std::string{"127.0.0.1:9198"}))
|
||||
{
|
||||
hydraData = getEnvOrDie("HYDRA_DATA");
|
||||
|
||||
logDir = canonPath(hydraData + "/build-logs");
|
||||
|
||||
if (metricsAddrOpt.has_value()) {
|
||||
metricsAddr = metricsAddrOpt.value();
|
||||
}
|
||||
|
||||
/* handle deprecated store specification */
|
||||
if (config->getStrOption("store_mode") != "")
|
||||
throw Error("store_mode in hydra.conf is deprecated, please use store_uri");
|
||||
if (config->getStrOption("binary_cache_dir") != "")
|
||||
printMsg(lvlError, "hydra.conf: binary_cache_dir is deprecated and ignored. use store_uri=file:// instead");
|
||||
if (config->getStrOption("binary_cache_s3_bucket") != "")
|
||||
printMsg(lvlError, "hydra.conf: binary_cache_s3_bucket is deprecated and ignored. use store_uri=s3:// instead");
|
||||
if (config->getStrOption("binary_cache_secret_key_file") != "")
|
||||
printMsg(lvlError, "hydra.conf: binary_cache_secret_key_file is deprecated and ignored. use store_uri=...?secret-key= instead");
|
||||
|
||||
createDirs(rootsDir);
|
||||
}
|
||||
|
||||
|
||||
nix::MaintainCount<counter> State::startDbUpdate()
|
||||
{
|
||||
if (nrActiveDbUpdates > 6)
|
||||
printError("warning: %d concurrent database updates; PostgreSQL may be stalled", nrActiveDbUpdates.load());
|
||||
return MaintainCount<counter>(nrActiveDbUpdates);
|
||||
}
|
||||
|
||||
|
||||
ref<Store> State::getDestStore()
|
||||
{
|
||||
return ref<Store>(_destStore);
|
||||
}
|
||||
|
||||
|
||||
void State::parseMachines(const std::string & contents)
|
||||
{
|
||||
Machines newMachines, oldMachines;
|
||||
{
|
||||
auto machines_(machines.lock());
|
||||
oldMachines = *machines_;
|
||||
}
|
||||
|
||||
for (auto && machine_ : nix::Machine::parseConfig({}, contents)) {
|
||||
auto machine = std::make_shared<::Machine>(std::move(machine_));
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->storeUri.variant);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->storeUri.render());
|
||||
else
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->storeUri.render());
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<::Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->storeUri.variant] = machine;
|
||||
}
|
||||
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printInfo("removing machine ‘%1%’", m.second->storeUri.render());
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
machine->enabled = false;
|
||||
newMachines[m.first] = machine;
|
||||
}
|
||||
|
||||
static bool warned = false;
|
||||
if (newMachines.empty() && !warned) {
|
||||
printError("warning: no build machines are defined");
|
||||
warned = true;
|
||||
}
|
||||
|
||||
auto machines_(machines.lock());
|
||||
*machines_ = newMachines;
|
||||
|
||||
wakeDispatcher();
|
||||
}
|
||||
|
||||
|
||||
void State::monitorMachinesFile()
|
||||
{
|
||||
std::string defaultMachinesFile = "/etc/nix/machines";
|
||||
auto machinesFiles = tokenizeString<std::vector<Path>>(
|
||||
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
|
||||
|
||||
if (machinesFiles.empty()) {
|
||||
parseMachines("localhost " +
|
||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||
machinesReadyLock.unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<struct stat> fileStats;
|
||||
fileStats.resize(machinesFiles.size());
|
||||
for (unsigned int n = 0; n < machinesFiles.size(); ++n) {
|
||||
auto & st(fileStats[n]);
|
||||
st.st_ino = st.st_mtime = 0;
|
||||
}
|
||||
|
||||
auto readMachinesFiles = [&]() {
|
||||
|
||||
/* Check if any of the machines files changed. */
|
||||
bool anyChanged = false;
|
||||
for (unsigned int n = 0; n < machinesFiles.size(); ++n) {
|
||||
Path machinesFile = machinesFiles[n];
|
||||
struct stat st;
|
||||
if (stat(machinesFile.c_str(), &st) != 0) {
|
||||
if (errno != ENOENT)
|
||||
throw SysError("getting stats about ‘%s’", machinesFile);
|
||||
st.st_ino = st.st_mtime = 0;
|
||||
}
|
||||
auto & old(fileStats[n]);
|
||||
if (old.st_ino != st.st_ino || old.st_mtime != st.st_mtime)
|
||||
anyChanged = true;
|
||||
old = st;
|
||||
}
|
||||
|
||||
if (!anyChanged) return;
|
||||
|
||||
debug("reloading machines files");
|
||||
|
||||
std::string contents;
|
||||
for (auto & machinesFile : machinesFiles) {
|
||||
try {
|
||||
contents += readFile(machinesFile);
|
||||
contents += '\n';
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT) throw;
|
||||
}
|
||||
}
|
||||
|
||||
parseMachines(contents);
|
||||
};
|
||||
|
||||
auto firstParse = true;
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
readMachinesFiles();
|
||||
if (firstParse) {
|
||||
machinesReadyLock.unlock();
|
||||
firstParse = false;
|
||||
}
|
||||
// FIXME: use inotify.
|
||||
sleep(30);
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "reloading machines file: %s", e.what());
|
||||
sleep(5);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::clearBusy(Connection & conn, time_t stopTime)
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
txn.exec("update BuildSteps set busy = 0, status = $1, stopTime = $2 where busy != 0",
|
||||
pqxx::params{(int) bsAborted,
|
||||
stopTime != 0 ? std::make_optional(stopTime) : std::nullopt}).no_rows();
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
|
||||
unsigned int State::allocBuildStep(pqxx::work & txn, BuildID buildId)
|
||||
{
|
||||
auto res = txn.exec("select max(stepnr) from BuildSteps where build = $1", buildId).one_row();
|
||||
return res[0].is_null() ? 1 : res[0].as<int>() + 1;
|
||||
}
|
||||
|
||||
|
||||
unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID buildId, Step::ptr step,
|
||||
const std::string & machine, BuildStatus status, const std::string & errorMsg, BuildID propagatedFrom)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, buildId);
|
||||
|
||||
auto r = txn.exec("insert into BuildSteps (build, stepnr, type, drvPath, busy, startTime, system, status, propagatedFrom, errorMsg, stopTime, machine) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) on conflict do nothing",
|
||||
pqxx::params{buildId,
|
||||
stepNr,
|
||||
0, // == build
|
||||
localStore->printStorePath(step->drvPath),
|
||||
status == bsBusy ? 1 : 0,
|
||||
startTime != 0 ? std::make_optional(startTime) : std::nullopt,
|
||||
step->drv->platform,
|
||||
status != bsBusy ? std::make_optional((int) status) : std::nullopt,
|
||||
propagatedFrom != 0 ? std::make_optional(propagatedFrom) : std::nullopt, // internal::params
|
||||
errorMsg != "" ? std::make_optional(errorMsg) : std::nullopt,
|
||||
startTime != 0 && status != bsBusy ? std::make_optional(startTime) : std::nullopt,
|
||||
machine});
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||
txn.exec("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
pqxx::params{buildId, stepNr, name,
|
||||
output
|
||||
? std::optional { localStore->printStorePath(*output)}
|
||||
: std::nullopt}).no_rows();
|
||||
|
||||
if (status == bsBusy)
|
||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||
|
||||
return stepNr;
|
||||
}
|
||||
|
||||
|
||||
void State::updateBuildStep(pqxx::work & txn, BuildID buildId, unsigned int stepNr, StepState stepState)
|
||||
{
|
||||
if (txn.exec("update BuildSteps set busy = $1 where build = $2 and stepnr = $3 and busy != 0 and status is null",
|
||||
pqxx::params{(int) stepState,
|
||||
buildId,
|
||||
stepNr}).affected_rows() != 1)
|
||||
throw Error("step %d of build %d is in an unexpected state", stepNr, buildId);
|
||||
}
|
||||
|
||||
|
||||
void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||
BuildID buildId, unsigned int stepNr, const std::string & machine)
|
||||
{
|
||||
assert(result.startTime);
|
||||
assert(result.stopTime);
|
||||
txn.exec("update BuildSteps set busy = 0, status = $1, errorMsg = $4, startTime = $5, stopTime = $6, machine = $7, overhead = $8, timesBuilt = $9, isNonDeterministic = $10 where build = $2 and stepnr = $3",
|
||||
pqxx::params{(int) result.stepStatus, buildId, stepNr,
|
||||
result.errorMsg != "" ? std::make_optional(result.errorMsg) : std::nullopt,
|
||||
result.startTime, result.stopTime,
|
||||
machine != "" ? std::make_optional(machine) : std::nullopt,
|
||||
result.overhead != 0 ? std::make_optional(result.overhead) : std::nullopt,
|
||||
result.timesBuilt > 0 ? std::make_optional(result.timesBuilt) : std::nullopt,
|
||||
result.timesBuilt > 1 ? std::make_optional(result.isNonDeterministic) : std::nullopt}).no_rows();
|
||||
assert(result.logFile.find('\t') == std::string::npos);
|
||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||
buildId, stepNr, result.logFile));
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||
auto res = txn.exec("select drvPath from BuildSteps where build = $1 and stepnr = $2", pqxx::params{buildId, stepNr}).one_row();
|
||||
assert(res.size());
|
||||
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||
// If we've finished building, all the paths should be known
|
||||
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||
txn.exec("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||
pqxx::params{buildId, stepNr, name, localStore->printStorePath(output)}).no_rows();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, build->id);
|
||||
|
||||
auto r = txn.exec("insert into BuildSteps (build, stepnr, type, drvPath, busy, status, startTime, stopTime) values ($1, $2, $3, $4, $5, $6, $7, $8) on conflict do nothing",
|
||||
pqxx::params{build->id,
|
||||
stepNr,
|
||||
1, // == substitution
|
||||
(localStore->printStorePath(drvPath)),
|
||||
0,
|
||||
0,
|
||||
startTime,
|
||||
stopTime});
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
txn.exec("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
pqxx::params{build->id, stepNr, outputName,
|
||||
localStore->printStorePath(storePath)}).no_rows();
|
||||
|
||||
return stepNr;
|
||||
}
|
||||
|
||||
|
||||
/* Get the steps and unfinished builds that depend on the given step. */
|
||||
void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step::ptr> & steps)
|
||||
{
|
||||
std::function<void(Step::ptr)> visit;
|
||||
|
||||
visit = [&](Step::ptr step) {
|
||||
if (steps.count(step)) return;
|
||||
steps.insert(step);
|
||||
|
||||
std::vector<Step::wptr> rdeps;
|
||||
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
|
||||
for (auto & build : step_->builds) {
|
||||
auto build_ = build.lock();
|
||||
if (build_ && !build_->finishedInDB) builds.insert(build_);
|
||||
}
|
||||
|
||||
/* Make a copy of rdeps so that we don't hold the lock for
|
||||
very long. */
|
||||
rdeps = step_->rdeps;
|
||||
}
|
||||
|
||||
for (auto & rdep : rdeps) {
|
||||
auto rdep_ = rdep.lock();
|
||||
if (rdep_) visit(rdep_);
|
||||
}
|
||||
};
|
||||
|
||||
visit(step);
|
||||
}
|
||||
|
||||
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr start)
|
||||
{
|
||||
std::set<Step::ptr> queued;
|
||||
std::queue<Step::ptr> todo;
|
||||
todo.push(start);
|
||||
|
||||
while (!todo.empty()) {
|
||||
auto step = todo.front();
|
||||
todo.pop();
|
||||
|
||||
visitor(step);
|
||||
|
||||
auto state(step->state.lock());
|
||||
for (auto & dep : state->deps)
|
||||
if (queued.find(dep) == queued.end()) {
|
||||
queued.insert(dep);
|
||||
todo.push(dep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
const BuildOutput & res, bool isCachedBuild, time_t startTime, time_t stopTime)
|
||||
{
|
||||
if (build->finishedInDB) return;
|
||||
|
||||
if (txn.exec("select 1 from Builds where id = $1 and finished = 0", pqxx::params{build->id}).empty()) return;
|
||||
|
||||
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, size = $5, closureSize = $6, releaseName = $7, isCachedBuild = $8, notificationPendingSince = $4 where id = $1",
|
||||
pqxx::params{build->id,
|
||||
(int) (res.failed ? bsFailedWithOutput : bsSuccess),
|
||||
startTime,
|
||||
stopTime,
|
||||
res.size,
|
||||
res.closureSize,
|
||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||
isCachedBuild ? 1 : 0}).no_rows();
|
||||
|
||||
for (auto & [outputName, outputPath] : res.outputs) {
|
||||
txn.exec("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||
pqxx::params{build->id,
|
||||
outputName,
|
||||
localStore->printStorePath(outputPath)}
|
||||
).no_rows();
|
||||
}
|
||||
|
||||
txn.exec("delete from BuildProducts where build = $1", pqxx::params{build->id}).no_rows();
|
||||
|
||||
unsigned int productNr = 1;
|
||||
for (auto & product : res.products) {
|
||||
txn.exec("insert into BuildProducts (build, productnr, type, subtype, fileSize, sha256hash, path, name, defaultPath) values ($1, $2, $3, $4, $5, $6, $7, $8, $9)",
|
||||
pqxx::params{build->id,
|
||||
productNr++,
|
||||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath}).no_rows();
|
||||
}
|
||||
|
||||
txn.exec("delete from BuildMetrics where build = $1", pqxx::params{build->id}).no_rows();
|
||||
|
||||
for (auto & metric : res.metrics) {
|
||||
txn.exec("insert into BuildMetrics (build, name, unit, value, project, jobset, job, timestamp) values ($1, $2, $3, $4, $5, $6, $7, $8)",
|
||||
pqxx::params{build->id,
|
||||
metric.second.name,
|
||||
metric.second.unit != "" ? std::make_optional(metric.second.unit) : std::nullopt,
|
||||
metric.second.value,
|
||||
build->projectName,
|
||||
build->jobsetName,
|
||||
build->jobName,
|
||||
build->timestamp}).no_rows();
|
||||
}
|
||||
|
||||
nrBuildsDone++;
|
||||
}
|
||||
|
||||
|
||||
bool State::checkCachedFailure(Step::ptr step, Connection & conn)
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
if (!txn.exec("select 1 from FailedPaths where path = $1", pqxx::params{localStore->printStorePath(*i.second.second)}).empty())
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void State::notifyBuildStarted(pqxx::work & txn, BuildID buildId)
|
||||
{
|
||||
txn.exec(fmt("notify build_started, '%s'", buildId));
|
||||
}
|
||||
|
||||
|
||||
void State::notifyBuildFinished(pqxx::work & txn, BuildID buildId,
|
||||
const std::vector<BuildID> & dependentIds)
|
||||
{
|
||||
auto payload = fmt("%d", buildId);
|
||||
for (auto & d : dependentIds)
|
||||
payload += fmt("\t%d", d);
|
||||
// FIXME: apparently parameterized() doesn't support NOTIFY.
|
||||
txn.exec(fmt("notify build_finished, '%s'", payload));
|
||||
}
|
||||
|
||||
|
||||
std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||
{
|
||||
Path lockPath = hydraData + "/queue-runner/lock";
|
||||
|
||||
createDirs(dirOf(lockPath));
|
||||
|
||||
auto lock = std::make_shared<PathLocks>();
|
||||
if (!lock->lockPaths(PathSet({lockPath}), "", false)) return 0;
|
||||
|
||||
return lock;
|
||||
}
|
||||
|
||||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
time_t now = time(0);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsWaitingForDownloadSlot", nrStepsWaitingForDownloadSlot.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
{"nrRetries", nrRetries.load()},
|
||||
{"maxNrRetries", maxNrRetries.load()},
|
||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
{
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||
}
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||
}
|
||||
if (nrStepsDone) {
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
}
|
||||
|
||||
{
|
||||
auto machines_json = json::object();
|
||||
auto machines_(machines.lock());
|
||||
for (auto & i : *machines_) {
|
||||
auto & m(i.second);
|
||||
auto & s(m->state);
|
||||
auto info(m->state->connectInfo.lock());
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
{"currentJobs", s->currentJobs.load()},
|
||||
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||
{"consecutiveFailures", info->consecutiveFailures},
|
||||
};
|
||||
|
||||
if (s->currentJobs == 0)
|
||||
machine["idleSince"] = s->idleSince.load();
|
||||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
}
|
||||
machines_json[m->storeUri.render()] = machine;
|
||||
}
|
||||
statusJson["machines"] = machines_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto jobsets_json = json::object();
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||
{"shareUsed", jobset.second->shareUsed()},
|
||||
{"seconds", jobset.second->getSeconds()},
|
||||
};
|
||||
}
|
||||
statusJson["jobsets"] = jobsets_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto machineTypesJson = json::object();
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
for (auto & i : *machineTypes_) {
|
||||
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||
{"runnable", i.second.runnable},
|
||||
{"running", i.second.running},
|
||||
};
|
||||
if (i.second.runnable > 0)
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
if (i.second.running == 0)
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
}
|
||||
statusJson["machineTypes"] = machineTypesJson;
|
||||
}
|
||||
|
||||
auto store = getDestStore();
|
||||
|
||||
auto & stats = store->getStats();
|
||||
statusJson["store"] = {
|
||||
{"narInfoRead", stats.narInfoRead.load()},
|
||||
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||
{"narRead", stats.narRead.load()},
|
||||
{"narReadBytes", stats.narReadBytes.load()},
|
||||
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||
{"narWrite", stats.narWrite.load()},
|
||||
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
};
|
||||
|
||||
#if NIX_WITH_S3_SUPPORT
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
auto jsonS3 = statusJson["s3"] = {
|
||||
{"put", s3Stats.put.load()},
|
||||
{"putBytes", s3Stats.putBytes.load()},
|
||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
// FIXME: use PostgreSQL 9.5 upsert.
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'").no_rows();
|
||||
txn.exec("insert into SystemStatus values ('queue-runner', $1)", pqxx::params{statusJson.dump()}).no_rows();
|
||||
txn.exec("notify status_dumped");
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::showStatus()
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
receiver statusDumped(*conn, "status_dumped");
|
||||
|
||||
std::string status;
|
||||
bool barf = false;
|
||||
|
||||
/* Get the last JSON status dump from the database. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||
if (res.size()) status = res[0][0].as<std::string>();
|
||||
}
|
||||
|
||||
if (status != "") {
|
||||
|
||||
/* If the status is not empty, then the queue runner is
|
||||
running. Ask it to update the status dump. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
txn.exec("notify dump_status");
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
/* Wait until it has done so. */
|
||||
barf = conn->await_notification(5, 0) == 0;
|
||||
|
||||
/* Get the new status. */
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
auto res = txn.exec("select status from SystemStatus where what = 'queue-runner'");
|
||||
if (res.size()) status = res[0][0].as<std::string>();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (status == "") status = R"({"status":"down"})";
|
||||
|
||||
std::cout << status << "\n";
|
||||
|
||||
if (barf)
|
||||
throw Error("queue runner did not respond; status information may be wrong");
|
||||
}
|
||||
|
||||
|
||||
void State::unlock()
|
||||
{
|
||||
auto lock = acquireGlobalLock();
|
||||
if (!lock)
|
||||
throw Error("hydra-queue-runner is currently running");
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
clearBusy(*conn, 0);
|
||||
|
||||
{
|
||||
pqxx::work txn(*conn);
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'").no_rows();
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::run(BuildID buildOne)
|
||||
{
|
||||
/* Can't be bothered to shut down cleanly. Goodbye! */
|
||||
auto callback = createInterruptCallback([&]() { std::_Exit(0); });
|
||||
|
||||
startedAt = time(0);
|
||||
this->buildOne = buildOne;
|
||||
|
||||
auto lock = acquireGlobalLock();
|
||||
if (!lock)
|
||||
throw Error("hydra-queue-runner is already running");
|
||||
|
||||
std::cout << "Starting the Prometheus exporter on " << metricsAddr << std::endl;
|
||||
|
||||
/* Set up simple exporter, to show that we're still alive. */
|
||||
prometheus::Exposer promExposer{metricsAddr};
|
||||
auto exposerPort = promExposer.GetListeningPorts().front();
|
||||
|
||||
promExposer.RegisterCollectable(prom.registry);
|
||||
|
||||
std::cout << "Started the Prometheus exporter, listening on "
|
||||
<< metricsAddr << "/metrics (port " << exposerPort << ")"
|
||||
<< std::endl;
|
||||
|
||||
Store::Config::Params localParams;
|
||||
localParams["max-connections"] = "16";
|
||||
localParams["max-connection-age"] = "600";
|
||||
localStore = openStore(getEnv("NIX_REMOTE").value_or(""), localParams);
|
||||
|
||||
auto storeUri = config->getStrOption("store_uri");
|
||||
_destStore = storeUri == "" ? localStore : openStore(storeUri);
|
||||
|
||||
useSubstitutes = config->getBoolOption("use-substitutes", false);
|
||||
|
||||
// FIXME: hacky mechanism for configuring determinism checks.
|
||||
for (auto & s : tokenizeString<Strings>(config->getStrOption("xxx-jobset-repeats"))) {
|
||||
auto s2 = tokenizeString<std::vector<std::string>>(s, ":");
|
||||
if (s2.size() != 3) throw Error("bad value in xxx-jobset-repeats");
|
||||
jobsetRepeats.emplace(std::make_pair(s2[0], s2[1]), std::stoi(s2[2]));
|
||||
}
|
||||
|
||||
{
|
||||
auto conn(dbPool.get());
|
||||
clearBusy(*conn, 0);
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
|
||||
machinesReadyLock.lock();
|
||||
std::thread(&State::monitorMachinesFile, this).detach();
|
||||
|
||||
std::thread(&State::queueMonitor, this).detach();
|
||||
|
||||
std::thread(&State::dispatcher, this).detach();
|
||||
|
||||
/* Periodically clean up orphaned busy steps in the database. */
|
||||
std::thread([&]() {
|
||||
while (true) {
|
||||
sleep(180);
|
||||
|
||||
std::set<std::pair<BuildID, int>> steps;
|
||||
{
|
||||
auto orphanedSteps_(orphanedSteps.lock());
|
||||
if (orphanedSteps_->empty()) continue;
|
||||
steps = *orphanedSteps_;
|
||||
orphanedSteps_->clear();
|
||||
}
|
||||
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
pqxx::work txn(*conn);
|
||||
for (auto & step : steps) {
|
||||
printMsg(lvlError, "cleaning orphaned step %d of build %d", step.second, step.first);
|
||||
txn.exec("update BuildSteps set busy = 0, status = $1 where build = $2 and stepnr = $3 and busy != 0",
|
||||
pqxx::params{(int) bsAborted,
|
||||
step.first,
|
||||
step.second}).no_rows();
|
||||
}
|
||||
txn.commit();
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "cleanup thread: %s", e.what());
|
||||
auto orphanedSteps_(orphanedSteps.lock());
|
||||
orphanedSteps_->insert(steps.begin(), steps.end());
|
||||
}
|
||||
}
|
||||
}).detach();
|
||||
|
||||
/* Make sure that old daemon connections are closed even when
|
||||
we're not doing much. */
|
||||
std::thread([&]() {
|
||||
while (true) {
|
||||
sleep(10);
|
||||
try {
|
||||
if (auto remoteStore = getDestStore().dynamic_pointer_cast<RemoteStore>())
|
||||
remoteStore->flushBadConnections();
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "connection flush thread: %s", e.what());
|
||||
}
|
||||
}
|
||||
}).detach();
|
||||
|
||||
/* Monitor the database for status dump requests (e.g. from
|
||||
‘hydra-queue-runner --status’). */
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char * * argv)
|
||||
{
|
||||
return handleExceptions(argv[0], [&]() {
|
||||
initNix();
|
||||
|
||||
signal(SIGINT, SIG_DFL);
|
||||
signal(SIGTERM, SIG_DFL);
|
||||
signal(SIGHUP, SIG_DFL);
|
||||
|
||||
// FIXME: do this in the child environment in openConnection().
|
||||
unsetenv("IN_SYSTEMD");
|
||||
|
||||
bool unlock = false;
|
||||
bool status = false;
|
||||
BuildID buildOne = 0;
|
||||
std::optional<std::string> metricsAddrOpt = std::nullopt;
|
||||
|
||||
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
|
||||
if (*arg == "--unlock")
|
||||
unlock = true;
|
||||
else if (*arg == "--status")
|
||||
status = true;
|
||||
else if (*arg == "--build-one") {
|
||||
if (auto b = string2Int<BuildID>(getArg(*arg, arg, end)))
|
||||
buildOne = *b;
|
||||
else
|
||||
throw Error("‘--build-one’ requires a build ID");
|
||||
} else if (*arg == "--prometheus-address") {
|
||||
metricsAddrOpt = getArg(*arg, arg, end);
|
||||
} else
|
||||
return false;
|
||||
return true;
|
||||
});
|
||||
|
||||
settings.verboseBuild = true;
|
||||
|
||||
State state{metricsAddrOpt};
|
||||
if (status)
|
||||
state.showStatus();
|
||||
else if (unlock)
|
||||
state.unlock();
|
||||
else
|
||||
state.run(buildOne);
|
||||
});
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
srcs = files(
|
||||
'builder.cc',
|
||||
'build-remote.cc',
|
||||
'build-result.cc',
|
||||
'dispatcher.cc',
|
||||
'hydra-queue-runner.cc',
|
||||
'nar-extractor.cc',
|
||||
'queue-monitor.cc',
|
||||
)
|
||||
|
||||
hydra_queue_runner = executable('hydra-queue-runner',
|
||||
'hydra-queue-runner.cc',
|
||||
srcs,
|
||||
dependencies: [
|
||||
libhydra_dep,
|
||||
nix_util_dep,
|
||||
nix_store_dep,
|
||||
nix_main_dep,
|
||||
pqxx_dep,
|
||||
prom_cpp_core_dep,
|
||||
prom_cpp_pull_dep,
|
||||
],
|
||||
install: true,
|
||||
)
|
||||
@@ -1,103 +0,0 @@
|
||||
#include "nar-extractor.hh"
|
||||
|
||||
#include <nix/util/archive.hh>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
"/nix-support/hydra-release-name",
|
||||
"/nix-support/hydra-metrics",
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
std::filesystem::path prefix;
|
||||
|
||||
Path toKey(const CanonPath & path)
|
||||
{
|
||||
std::filesystem::path p = prefix;
|
||||
// Conditional to avoid trailing slash
|
||||
if (!path.isRoot()) p /= path.rel();
|
||||
return p;
|
||||
}
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
: members(members), prefix(prefix)
|
||||
{ }
|
||||
|
||||
void createDirectory(const CanonPath & path) override
|
||||
{
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const CanonPath & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
{
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(toKey(path), NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path.abs()) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
}
|
||||
|
||||
void createSymlink(const CanonPath & path, const std::string & target) override
|
||||
{
|
||||
members.insert_or_assign(toKey(path), NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void extractNarData(
|
||||
Source & source,
|
||||
const Path & prefix,
|
||||
NarMemberDatas & members)
|
||||
{
|
||||
Extractor extractor(members, prefix);
|
||||
parseDump(extractor, source);
|
||||
// Note: this point may not be reached if we're in a coroutine.
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <nix/util/source-accessor.hh>
|
||||
#include <nix/util/types.hh>
|
||||
#include <nix/util/serialise.hh>
|
||||
#include <nix/util/hash.hh>
|
||||
|
||||
struct NarMemberData
|
||||
{
|
||||
nix::SourceAccessor::Type type;
|
||||
std::optional<uint64_t> fileSize;
|
||||
std::optional<std::string> contents;
|
||||
std::optional<nix::Hash> sha256;
|
||||
};
|
||||
|
||||
typedef std::map<nix::Path, NarMemberData> NarMemberDatas;
|
||||
|
||||
/* Read a NAR from a source and get to some info about every file
|
||||
inside the NAR. */
|
||||
void extractNarData(
|
||||
nix::Source & source,
|
||||
const nix::Path & prefix,
|
||||
NarMemberDatas & members);
|
||||
@@ -1,758 +0,0 @@
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include <nix/store/globals.hh>
|
||||
#include <nix/store/parsed-derivations.hh>
|
||||
#include <nix/util/thread-pool.hh>
|
||||
|
||||
#include <cstring>
|
||||
#include <signal.h>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
} catch (std::exception & e) {
|
||||
printError("queue monitor: %s", e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
{
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
bool quit = false;
|
||||
while (!quit) {
|
||||
auto t_before_work = std::chrono::steady_clock::now();
|
||||
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(conn, destStore);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
auto t_after_work = std::chrono::steady_clock::now();
|
||||
|
||||
prom.queue_monitor_time_spent_running.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_work - t_before_work).count());
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done && !quit) {
|
||||
conn.await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn.get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
printMsg(lvlTalkative, "got notification: new builds added to the queue");
|
||||
}
|
||||
if (buildsRestarted.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds restarted");
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(conn);
|
||||
}
|
||||
|
||||
auto t_after_sleep = std::chrono::steady_clock::now();
|
||||
prom.queue_monitor_time_spent_waiting.Increment(
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(t_after_sleep - t_after_work).count());
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
||||
struct PreviousFailure : public std::exception {
|
||||
Step::ptr step;
|
||||
PreviousFailure(Step::ptr step) : step(step) { }
|
||||
};
|
||||
|
||||
|
||||
bool State::getQueuedBuilds(Connection & conn,
|
||||
ref<Store> destStore)
|
||||
{
|
||||
prom.queue_checks_started.Increment();
|
||||
|
||||
printInfo("checking the queue for builds...");
|
||||
|
||||
/* Grab the queued builds from the database, but don't process
|
||||
them yet (since we don't want a long-running transaction). */
|
||||
std::vector<BuildID> newIDs;
|
||||
std::unordered_map<BuildID, Build::ptr> newBuildsByID;
|
||||
std::multimap<StorePath, BuildID> newBuildsByPath;
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
auto res = txn.exec("select builds.id, builds.jobset_id, jobsets.project as project, "
|
||||
"jobsets.name as jobset, job, drvPath, maxsilent, timeout, timestamp, "
|
||||
"globalPriority, priority from Builds "
|
||||
"inner join jobsets on builds.jobset_id = jobsets.id "
|
||||
"where finished = 0 order by globalPriority desc, random()");
|
||||
|
||||
for (auto const & row : res) {
|
||||
auto builds_(builds.lock());
|
||||
BuildID id = row["id"].as<BuildID>();
|
||||
if (buildOne && id != buildOne) continue;
|
||||
if (builds_->count(id)) continue;
|
||||
|
||||
auto build = std::make_shared<Build>(
|
||||
localStore->parseStorePath(row["drvPath"].as<std::string>()));
|
||||
build->id = id;
|
||||
build->jobsetId = row["jobset_id"].as<JobsetID>();
|
||||
build->projectName = row["project"].as<std::string>();
|
||||
build->jobsetName = row["jobset"].as<std::string>();
|
||||
build->jobName = row["job"].as<std::string>();
|
||||
build->maxSilentTime = row["maxsilent"].as<int>();
|
||||
build->buildTimeout = row["timeout"].as<int>();
|
||||
build->timestamp = row["timestamp"].as<time_t>();
|
||||
build->globalPriority = row["globalPriority"].as<int>();
|
||||
build->localPriority = row["priority"].as<int>();
|
||||
build->jobset = createJobset(txn, build->projectName, build->jobsetName, build->jobsetId);
|
||||
|
||||
newIDs.push_back(id);
|
||||
newBuildsByID[id] = build;
|
||||
newBuildsByPath.emplace(std::make_pair(build->drvPath, id));
|
||||
}
|
||||
}
|
||||
|
||||
std::set<Step::ptr> newRunnable;
|
||||
unsigned int nrAdded;
|
||||
std::function<void(Build::ptr)> createBuild;
|
||||
std::set<StorePath> finishedDrvs;
|
||||
|
||||
createBuild = [&](Build::ptr build) {
|
||||
prom.queue_build_loads.Increment();
|
||||
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||
nrAdded++;
|
||||
newBuildsByID.erase(build->id);
|
||||
|
||||
if (!localStore->isValidPath(build->drvPath)) {
|
||||
/* Derivation has been GC'ed prematurely. */
|
||||
printError("aborting GC'ed build %1%", build->id);
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0",
|
||||
pqxx::params{build->id,
|
||||
(int) bsAborted,
|
||||
time(0)}).no_rows();
|
||||
txn.commit();
|
||||
build->finishedInDB = true;
|
||||
nrBuildsDone++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
std::set<Step::ptr> newSteps;
|
||||
Step::ptr step;
|
||||
|
||||
/* Create steps for this derivation and its dependencies. */
|
||||
try {
|
||||
step = createStep(destStore, conn, build, build->drvPath,
|
||||
build, 0, finishedDrvs, newSteps, newRunnable);
|
||||
} catch (PreviousFailure & ex) {
|
||||
|
||||
/* Some step previously failed, so mark the build as
|
||||
failed right away. */
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlError, "marking build %d as cached failure due to ‘%s’",
|
||||
build->id, localStore->printStorePath(ex.step->drvPath));
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
|
||||
/* Find the previous build step record, first by
|
||||
derivation path, then by output path. */
|
||||
BuildID propagatedFrom = 0;
|
||||
|
||||
auto res = txn.exec("select max(build) from BuildSteps where drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
||||
pqxx::params{localStore->printStorePath(ex.step->drvPath)}).one_row();
|
||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||
|
||||
if (!propagatedFrom) {
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||
auto res = optOutputPath
|
||||
? txn.exec(
|
||||
std::string { common } + " and path = $1",
|
||||
pqxx::params{localStore->printStorePath(*optOutputPath)})
|
||||
: txn.exec(
|
||||
std::string { common } + " and drvPath = $1 and name = $2",
|
||||
pqxx::params{localStore->printStorePath(ex.step->drvPath), outputName});
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
createBuildStep(txn, 0, build->id, ex.step, "", bsCachedFailure, "", propagatedFrom);
|
||||
txn.exec("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $3, isCachedBuild = 1, notificationPendingSince = $3 "
|
||||
"where id = $1 and finished = 0",
|
||||
pqxx::params{build->id,
|
||||
(int) (ex.step->drvPath == build->drvPath ? bsFailed : bsDepFailed),
|
||||
time(0)}).no_rows();
|
||||
notifyBuildFinished(txn, build->id, {});
|
||||
txn.commit();
|
||||
build->finishedInDB = true;
|
||||
nrBuildsDone++;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Some of the new steps may be the top level of builds that
|
||||
we haven't processed yet. So do them now. This ensures that
|
||||
if build A depends on build B with top-level step X, then X
|
||||
will be "accounted" to B in doBuildStep(). */
|
||||
for (auto & r : newSteps) {
|
||||
auto i = newBuildsByPath.find(r->drvPath);
|
||||
if (i == newBuildsByPath.end()) continue;
|
||||
auto j = newBuildsByID.find(i->second);
|
||||
if (j == newBuildsByID.end()) continue;
|
||||
createBuild(j->second);
|
||||
}
|
||||
|
||||
/* If we didn't get a step, it means the step's outputs are
|
||||
all valid. So we mark this as a finished, cached build. */
|
||||
if (!step) {
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||
|
||||
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||
addRoot(i.second);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
time_t now = time(0);
|
||||
if (!buildOneDone && build->id == buildOne) buildOneDone = true;
|
||||
printMsg(lvlInfo, "marking build %1% as succeeded (cached)", build->id);
|
||||
markSucceededBuild(txn, build, res, true, now, now);
|
||||
notifyBuildFinished(txn, build->id, {});
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
build->finishedInDB = true;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Note: if we exit this scope prior to this, the build and
|
||||
all newly created steps are destroyed. */
|
||||
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
if (!build->finishedInDB) // FIXME: can this happen?
|
||||
(*builds_)[build->id] = build;
|
||||
build->toplevel = step;
|
||||
}
|
||||
|
||||
build->propagatePriorities();
|
||||
|
||||
printMsg(lvlChatty, "added build %1% (top-level step %2%, %3% new steps)",
|
||||
build->id, localStore->printStorePath(step->drvPath), newSteps.size());
|
||||
};
|
||||
|
||||
/* Now instantiate build steps for each new build. The builder
|
||||
threads can start building the runnable build steps right away,
|
||||
even while we're still processing other new builds. */
|
||||
system_time start = std::chrono::system_clock::now();
|
||||
|
||||
for (auto id : newIDs) {
|
||||
auto i = newBuildsByID.find(id);
|
||||
if (i == newBuildsByID.end()) continue;
|
||||
auto build = i->second;
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
newRunnable.clear();
|
||||
nrAdded = 0;
|
||||
try {
|
||||
createBuild(build);
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, HintFmt("while loading build %d: ", build->id));
|
||||
throw;
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
buildReadTimeMs += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
|
||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||
the builder threads. */
|
||||
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||
for (auto & r : newRunnable)
|
||||
makeRunnable(r);
|
||||
|
||||
if (buildOne && newRunnable.size() == 0) buildOneDone = true;
|
||||
|
||||
nrBuildsRead += nrAdded;
|
||||
|
||||
/* Stop after a certain time to allow priority bumps to be
|
||||
processed. */
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(60)) {
|
||||
prom.queue_checks_early_exits.Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
prom.queue_checks_finished.Increment();
|
||||
return newBuildsByID.empty();
|
||||
}
|
||||
|
||||
|
||||
void Build::propagatePriorities()
|
||||
{
|
||||
/* Update the highest global priority and lowest build ID fields
|
||||
of each dependency. This is used by the dispatcher to start
|
||||
steps in order of descending global priority and ascending
|
||||
build ID. */
|
||||
visitDependencies([&](const Step::ptr & step) {
|
||||
auto step_(step->state.lock());
|
||||
step_->highestGlobalPriority = std::max(step_->highestGlobalPriority, globalPriority);
|
||||
step_->highestLocalPriority = std::max(step_->highestLocalPriority, localPriority);
|
||||
step_->lowestBuildID = std::min(step_->lowestBuildID, id);
|
||||
step_->jobsets.insert(jobset);
|
||||
}, toplevel);
|
||||
}
|
||||
|
||||
|
||||
void State::processQueueChange(Connection & conn)
|
||||
{
|
||||
/* Get the current set of queued builds. */
|
||||
std::map<BuildID, int> currentIds;
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
auto res = txn.exec("select id, globalPriority from Builds where finished = 0");
|
||||
for (auto const & row : res)
|
||||
currentIds[row["id"].as<BuildID>()] = row["globalPriority"].as<BuildID>();
|
||||
}
|
||||
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
|
||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||
auto b = currentIds.find(i->first);
|
||||
if (b == currentIds.end()) {
|
||||
printInfo("discarding cancelled build %1%", i->first);
|
||||
i = builds_->erase(i);
|
||||
// FIXME: ideally we would interrupt active build steps here.
|
||||
continue;
|
||||
}
|
||||
if (i->second->globalPriority < b->second) {
|
||||
printInfo("priority of build %1% increased", i->first);
|
||||
i->second->globalPriority = b->second;
|
||||
i->second->propagatePriorities();
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto activeSteps(activeSteps_.lock());
|
||||
for (auto & activeStep : *activeSteps) {
|
||||
std::set<Build::ptr> dependents;
|
||||
std::set<Step::ptr> steps;
|
||||
getDependents(activeStep->step, dependents, steps);
|
||||
if (!dependents.empty()) continue;
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) continue;
|
||||
activeStepState->cancelled = true;
|
||||
if (activeStepState->pid != -1) {
|
||||
printInfo("killing builder process %d of build step ‘%s’",
|
||||
activeStepState->pid,
|
||||
localStore->printStorePath(activeStep->step->drvPath));
|
||||
if (kill(activeStepState->pid, SIGINT) == -1)
|
||||
printError("error killing build step ‘%s’: %s",
|
||||
localStore->printStorePath(activeStep->step->drvPath),
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::map<DrvOutput, std::optional<StorePath>> State::getMissingRemotePaths(
|
||||
ref<Store> destStore,
|
||||
const std::map<DrvOutput, std::optional<StorePath>> & paths)
|
||||
{
|
||||
Sync<std::map<DrvOutput, std::optional<StorePath>>> missing_;
|
||||
ThreadPool tp;
|
||||
|
||||
for (auto & [output, maybeOutputPath] : paths) {
|
||||
if (!maybeOutputPath) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
} else {
|
||||
tp.enqueue([&] {
|
||||
if (!destStore->isValidPath(*maybeOutputPath)) {
|
||||
auto missing(missing_.lock());
|
||||
missing->insert({output, maybeOutputPath});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
tp.process();
|
||||
|
||||
auto missing(missing_.lock());
|
||||
return *missing;
|
||||
}
|
||||
|
||||
|
||||
Step::ptr State::createStep(ref<Store> destStore,
|
||||
Connection & conn, Build::ptr build, const StorePath & drvPath,
|
||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<StorePath> & finishedDrvs,
|
||||
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable)
|
||||
{
|
||||
if (finishedDrvs.find(drvPath) != finishedDrvs.end()) return 0;
|
||||
|
||||
/* Check if the requested step already exists. If not, create a
|
||||
new step. In any case, make the step reachable from
|
||||
referringBuild or referringStep. This is done atomically (with
|
||||
‘steps’ locked), to ensure that this step can never become
|
||||
reachable from a new build after doBuildStep has removed it
|
||||
from ‘steps’. */
|
||||
Step::ptr step;
|
||||
bool isNew = false;
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
|
||||
/* See if the step already exists in ‘steps’ and is not
|
||||
stale. */
|
||||
auto prev = steps_->find(drvPath);
|
||||
if (prev != steps_->end()) {
|
||||
step = prev->second.lock();
|
||||
/* Since ‘step’ is a strong pointer, the referred Step
|
||||
object won't be deleted after this. */
|
||||
if (!step) steps_->erase(drvPath); // remove stale entry
|
||||
}
|
||||
|
||||
/* If it doesn't exist, create it. */
|
||||
if (!step) {
|
||||
step = std::make_shared<Step>(drvPath);
|
||||
isNew = true;
|
||||
}
|
||||
|
||||
auto step_(step->state.lock());
|
||||
|
||||
assert(step_->created != isNew);
|
||||
|
||||
if (referringBuild)
|
||||
step_->builds.push_back(referringBuild);
|
||||
|
||||
if (referringStep)
|
||||
step_->rdeps.push_back(referringStep);
|
||||
|
||||
steps_->insert_or_assign(drvPath, step);
|
||||
}
|
||||
|
||||
if (!isNew) return step;
|
||||
|
||||
prom.queue_steps_created.Increment();
|
||||
|
||||
printMsg(lvlDebug, "considering derivation ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Initialize the step. Note that the step may be visible in
|
||||
‘steps’ before this point, but that doesn't matter because
|
||||
it's not runnable yet, and other threads won't make it
|
||||
runnable while step->created == false. */
|
||||
step->drv = std::make_unique<Derivation>(localStore->readDerivation(drvPath));
|
||||
{
|
||||
try {
|
||||
step->drvOptions = std::make_unique<DerivationOptions>(
|
||||
DerivationOptions::fromStructuredAttrs(
|
||||
step->drv->env,
|
||||
step->drv->structuredAttrs ? &*step->drv->structuredAttrs : nullptr));
|
||||
} catch (Error & e) {
|
||||
e.addTrace({}, "while parsing derivation '%s'", localStore->printStorePath(drvPath));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
step->preferLocalBuild = step->drvOptions->willBuildLocally(*localStore, *step->drv);
|
||||
step->isDeterministic = getOr(step->drv->env, "isDetermistic", "0") == "1";
|
||||
|
||||
step->systemType = step->drv->platform;
|
||||
{
|
||||
StringSet features = step->requiredSystemFeatures = step->drvOptions->getRequiredSystemFeatures(*step->drv);
|
||||
if (step->preferLocalBuild)
|
||||
features.insert("local");
|
||||
if (!features.empty()) {
|
||||
step->systemType += ":";
|
||||
step->systemType += concatStringsSep(",", features);
|
||||
}
|
||||
}
|
||||
|
||||
/* If this derivation failed previously, give up. */
|
||||
if (checkCachedFailure(step, conn))
|
||||
throw PreviousFailure{step};
|
||||
|
||||
/* Are all outputs valid? */
|
||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||
std::map<DrvOutput, std::optional<StorePath>> paths;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
paths.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
}
|
||||
|
||||
auto missing = getMissingRemotePaths(destStore, paths);
|
||||
bool valid = missing.empty();
|
||||
|
||||
/* Try to copy the missing paths from the local store or from
|
||||
substitutes. */
|
||||
if (!missing.empty()) {
|
||||
|
||||
size_t avail = 0;
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we don't know the output path from the destination
|
||||
// store, see if the local store can tell us.
|
||||
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||
pathOpt = maybeRealisation->outPath;
|
||||
|
||||
if (!pathOpt) {
|
||||
// No hope of getting the store object if we don't know
|
||||
// the path.
|
||||
continue;
|
||||
}
|
||||
auto & path = *pathOpt;
|
||||
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||
avail++;
|
||||
else if (useSubstitutes) {
|
||||
SubstitutablePathInfos infos;
|
||||
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||
if (infos.size() == 1)
|
||||
avail++;
|
||||
}
|
||||
}
|
||||
|
||||
if (missing.size() == avail) {
|
||||
valid = true;
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we found everything, then we should know the path
|
||||
// to every missing store object now.
|
||||
assert(pathOpt);
|
||||
auto & path = *pathOpt;
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
|
||||
if (localStore->isValidPath(path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
else {
|
||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath));
|
||||
localStore->ensurePath(path);
|
||||
// FIXME: should copy directly from substituter to destStore.
|
||||
}
|
||||
|
||||
copyClosure(*localStore, *destStore,
|
||||
StorePathSet { path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(drvPath),
|
||||
e.what());
|
||||
valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: check whether all outputs are in the binary cache.
|
||||
if (valid) {
|
||||
finishedDrvs.insert(drvPath);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* No, we need to build. */
|
||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
step_->deps.insert(dep);
|
||||
}
|
||||
}
|
||||
|
||||
/* If the step has no (remaining) dependencies, make it
|
||||
runnable. */
|
||||
{
|
||||
auto step_(step->state.lock());
|
||||
assert(!step_->created);
|
||||
step_->created = true;
|
||||
if (step_->deps.empty())
|
||||
newRunnable.insert(step);
|
||||
}
|
||||
|
||||
newSteps.insert(step);
|
||||
|
||||
return step;
|
||||
}
|
||||
|
||||
|
||||
Jobset::ptr State::createJobset(pqxx::work & txn,
|
||||
const std::string & projectName, const std::string & jobsetName, const JobsetID jobsetID)
|
||||
{
|
||||
auto p = std::make_pair(projectName, jobsetName);
|
||||
|
||||
{
|
||||
auto jobsets_(jobsets.lock());
|
||||
auto i = jobsets_->find(p);
|
||||
if (i != jobsets_->end()) return i->second;
|
||||
}
|
||||
|
||||
auto res = txn.exec("select schedulingShares from Jobsets where id = $1",
|
||||
pqxx::params{jobsetID}).one_row();
|
||||
|
||||
auto shares = res["schedulingShares"].as<unsigned int>();
|
||||
|
||||
auto jobset = std::make_shared<Jobset>();
|
||||
jobset->setShares(shares);
|
||||
|
||||
/* Load the build steps from the last 24 hours. */
|
||||
auto res2 = txn.exec("select s.startTime, s.stopTime from BuildSteps s join Builds b on build = id "
|
||||
"where s.startTime is not null and s.stopTime > $1 and jobset_id = $2",
|
||||
pqxx::params{time(0) - Jobset::schedulingWindow * 10,
|
||||
jobsetID});
|
||||
for (auto const & row : res2) {
|
||||
time_t startTime = row["startTime"].as<time_t>();
|
||||
time_t stopTime = row["stopTime"].as<time_t>();
|
||||
jobset->addStep(startTime, stopTime - startTime);
|
||||
}
|
||||
|
||||
auto jobsets_(jobsets.lock());
|
||||
// Can't happen because only this thread adds to "jobsets".
|
||||
assert(jobsets_->find(p) == jobsets_->end());
|
||||
(*jobsets_)[p] = jobset;
|
||||
return jobset;
|
||||
}
|
||||
|
||||
|
||||
void State::processJobsetSharesChange(Connection & conn)
|
||||
{
|
||||
/* Get the current set of jobsets. */
|
||||
pqxx::work txn(conn);
|
||||
auto res = txn.exec("select project, name, schedulingShares from Jobsets");
|
||||
for (auto const & row : res) {
|
||||
auto jobsets_(jobsets.lock());
|
||||
auto i = jobsets_->find(std::make_pair(row["project"].as<std::string>(), row["name"].as<std::string>()));
|
||||
if (i == jobsets_->end()) continue;
|
||||
i->second->setShares(row["schedulingShares"].as<unsigned int>());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||
{
|
||||
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
auto r = txn.exec("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||
"join BuildOutputs o on b.id = o.build "
|
||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
pqxx::params{localStore->printStorePath(output)});
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
|
||||
printInfo("reusing build %d", id);
|
||||
|
||||
BuildOutput res;
|
||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||
res.releaseName = r[0][2].is_null() ? "" : r[0][2].as<std::string>();
|
||||
res.closureSize = r[0][3].is_null() ? 0 : r[0][3].as<uint64_t>();
|
||||
res.size = r[0][4].is_null() ? 0 : r[0][4].as<uint64_t>();
|
||||
|
||||
auto products = txn.exec("select type, subtype, fileSize, sha256hash, path, name, defaultPath from BuildProducts where build = $1 order by productnr",
|
||||
pqxx::params{id});
|
||||
|
||||
for (auto row : products) {
|
||||
BuildProduct product;
|
||||
product.type = row[0].as<std::string>();
|
||||
product.subtype = row[1].as<std::string>();
|
||||
if (row[2].is_null())
|
||||
product.isRegular = false;
|
||||
else {
|
||||
product.isRegular = true;
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
if (!row[6].is_null())
|
||||
product.defaultPath = row[6].as<std::string>();
|
||||
res.products.emplace_back(product);
|
||||
}
|
||||
|
||||
auto metrics = txn.exec("select name, unit, value from BuildMetrics where build = $1",
|
||||
pqxx::params{id});
|
||||
|
||||
for (auto row : metrics) {
|
||||
BuildMetric metric;
|
||||
metric.name = row[0].as<std::string>();
|
||||
metric.unit = row[1].is_null() ? "" : row[1].as<std::string>();
|
||||
metric.value = row[2].as<double>();
|
||||
res.metrics.emplace(metric.name, metric);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
NarMemberDatas narMembers;
|
||||
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||
}
|
||||
@@ -1,597 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <regex>
|
||||
#include <semaphore>
|
||||
|
||||
#include <prometheus/counter.h>
|
||||
#include <prometheus/gauge.h>
|
||||
#include <prometheus/registry.h>
|
||||
|
||||
#include "db.hh"
|
||||
|
||||
#include <nix/store/derivations.hh>
|
||||
#include <nix/store/derivation-options.hh>
|
||||
#include <nix/store/pathlocks.hh>
|
||||
#include <nix/util/pool.hh>
|
||||
#include <nix/store/build-result.hh>
|
||||
#include <nix/store/store-api.hh>
|
||||
#include <nix/util/sync.hh>
|
||||
#include "nar-extractor.hh"
|
||||
#include <nix/store/serve-protocol.hh>
|
||||
#include <nix/store/serve-protocol-impl.hh>
|
||||
#include <nix/store/serve-protocol-connection.hh>
|
||||
#include <nix/store/machines.hh>
|
||||
#include <nix/store/globals.hh>
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
|
||||
typedef unsigned int JobsetID;
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::system_clock> system_time;
|
||||
|
||||
typedef std::atomic<unsigned long> counter;
|
||||
|
||||
|
||||
typedef enum {
|
||||
bsSuccess = 0,
|
||||
bsFailed = 1,
|
||||
bsDepFailed = 2, // builds only
|
||||
bsAborted = 3,
|
||||
bsCancelled = 4,
|
||||
bsFailedWithOutput = 6, // builds only
|
||||
bsTimedOut = 7,
|
||||
bsCachedFailure = 8, // steps only
|
||||
bsUnsupported = 9,
|
||||
bsLogLimitExceeded = 10,
|
||||
bsNarSizeLimitExceeded = 11,
|
||||
bsNotDeterministic = 12,
|
||||
bsBusy = 100, // not stored
|
||||
} BuildStatus;
|
||||
|
||||
|
||||
typedef enum {
|
||||
ssPreparing = 1,
|
||||
ssConnecting = 10,
|
||||
ssSendingInputs = 20,
|
||||
ssBuilding = 30,
|
||||
ssWaitingForLocalSlot = 35,
|
||||
ssReceivingOutputs = 40,
|
||||
ssPostProcessing = 50,
|
||||
} StepState;
|
||||
|
||||
|
||||
struct RemoteResult
|
||||
{
|
||||
BuildStatus stepStatus = bsAborted;
|
||||
bool canRetry = false; // for bsAborted
|
||||
bool isCached = false; // for bsSucceed
|
||||
bool canCache = false; // for bsFailed
|
||||
std::string errorMsg; // for bsAborted
|
||||
|
||||
unsigned int timesBuilt = 0;
|
||||
bool isNonDeterministic = false;
|
||||
|
||||
time_t startTime = 0, stopTime = 0;
|
||||
unsigned int overhead = 0;
|
||||
nix::Path logFile;
|
||||
|
||||
BuildStatus buildStatus() const
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
||||
void updateWithBuildResult(const nix::BuildResult &);
|
||||
};
|
||||
|
||||
|
||||
struct Step;
|
||||
struct BuildOutput;
|
||||
|
||||
|
||||
class Jobset
|
||||
{
|
||||
public:
|
||||
|
||||
typedef std::shared_ptr<Jobset> ptr;
|
||||
typedef std::weak_ptr<Jobset> wptr;
|
||||
|
||||
static const time_t schedulingWindow = 24 * 60 * 60;
|
||||
|
||||
private:
|
||||
|
||||
std::atomic<time_t> seconds{0};
|
||||
std::atomic<unsigned int> shares{1};
|
||||
|
||||
/* The start time and duration of the most recent build steps. */
|
||||
nix::Sync<std::map<time_t, time_t>> steps;
|
||||
|
||||
public:
|
||||
|
||||
double shareUsed()
|
||||
{
|
||||
return (double) seconds / shares;
|
||||
}
|
||||
|
||||
void setShares(int shares_)
|
||||
{
|
||||
assert(shares_ > 0);
|
||||
shares = shares_;
|
||||
}
|
||||
|
||||
time_t getSeconds() { return seconds; }
|
||||
|
||||
void addStep(time_t startTime, time_t duration);
|
||||
|
||||
void pruneSteps();
|
||||
};
|
||||
|
||||
|
||||
struct Build
|
||||
{
|
||||
typedef std::shared_ptr<Build> ptr;
|
||||
typedef std::weak_ptr<Build> wptr;
|
||||
|
||||
BuildID id;
|
||||
nix::StorePath drvPath;
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
JobsetID jobsetId;
|
||||
std::string projectName, jobsetName, jobName;
|
||||
time_t timestamp;
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
int localPriority, globalPriority;
|
||||
|
||||
std::shared_ptr<Step> toplevel;
|
||||
|
||||
Jobset::ptr jobset;
|
||||
|
||||
std::atomic_bool finishedInDB{false};
|
||||
|
||||
Build(nix::StorePath && drvPath) : drvPath(std::move(drvPath))
|
||||
{ }
|
||||
|
||||
std::string fullJobName()
|
||||
{
|
||||
return projectName + ":" + jobsetName + ":" + jobName;
|
||||
}
|
||||
|
||||
void propagatePriorities();
|
||||
};
|
||||
|
||||
|
||||
struct Step
|
||||
{
|
||||
typedef std::shared_ptr<Step> ptr;
|
||||
typedef std::weak_ptr<Step> wptr;
|
||||
|
||||
nix::StorePath drvPath;
|
||||
std::unique_ptr<nix::Derivation> drv;
|
||||
std::unique_ptr<nix::DerivationOptions> drvOptions;
|
||||
nix::StringSet requiredSystemFeatures;
|
||||
bool preferLocalBuild;
|
||||
bool isDeterministic;
|
||||
std::string systemType; // concatenation of drv.platform and requiredSystemFeatures
|
||||
|
||||
struct State
|
||||
{
|
||||
/* Whether the step has finished initialisation. */
|
||||
bool created = false;
|
||||
|
||||
/* The build steps on which this step depends. */
|
||||
std::set<Step::ptr> deps;
|
||||
|
||||
/* The build steps that depend on this step. */
|
||||
std::vector<Step::wptr> rdeps;
|
||||
|
||||
/* Builds that have this step as the top-level derivation. */
|
||||
std::vector<Build::wptr> builds;
|
||||
|
||||
/* Jobsets to which this step belongs. Used for determining
|
||||
scheduling priority. */
|
||||
std::set<Jobset::ptr> jobsets;
|
||||
|
||||
/* Number of times we've tried this step. */
|
||||
unsigned int tries = 0;
|
||||
|
||||
/* Point in time after which the step can be retried. */
|
||||
system_time after;
|
||||
|
||||
/* The highest global priority of any build depending on this
|
||||
step. */
|
||||
int highestGlobalPriority{0};
|
||||
|
||||
/* The highest local priority of any build depending on this
|
||||
step. */
|
||||
int highestLocalPriority{0};
|
||||
|
||||
/* The lowest ID of any build depending on this step. */
|
||||
BuildID lowestBuildID{std::numeric_limits<BuildID>::max()};
|
||||
|
||||
/* The time at which this step became runnable. */
|
||||
system_time runnableSince;
|
||||
|
||||
/* The time that we last saw a machine that supports this
|
||||
step. */
|
||||
system_time lastSupported = std::chrono::system_clock::now();
|
||||
};
|
||||
|
||||
std::atomic_bool finished{false}; // debugging
|
||||
|
||||
nix::Sync<State> state;
|
||||
|
||||
Step(const nix::StorePath & drvPath) : drvPath(drvPath)
|
||||
{ }
|
||||
|
||||
~Step()
|
||||
{
|
||||
//printMsg(lvlError, format("destroying step %1%") % drvPath);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step::ptr> & steps);
|
||||
|
||||
/* Call ‘visitor’ for a step and all its dependencies. */
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||
|
||||
|
||||
struct Machine : nix::Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
counter currentJobs{0};
|
||||
counter nrStepsDone{0};
|
||||
counter totalStepTime{0}; // total time for steps, including closure copying
|
||||
counter totalStepBuildTime{0}; // total build time for steps
|
||||
std::atomic<time_t> idleSince{0};
|
||||
|
||||
struct ConnectInfo
|
||||
{
|
||||
system_time lastFailure, disabledUntil;
|
||||
unsigned int consecutiveFailures;
|
||||
};
|
||||
nix::Sync<ConnectInfo> connectInfo;
|
||||
|
||||
/* Mutex to prevent multiple threads from sending data to the
|
||||
same machine (which would be inefficient). */
|
||||
std::timed_mutex sendLock;
|
||||
};
|
||||
|
||||
State::ptr state;
|
||||
|
||||
bool supportsStep(Step::ptr step)
|
||||
{
|
||||
/* Check that this machine is of the type required by the
|
||||
step. */
|
||||
if (!systemTypes.count(step->drv->platform == "builtin" ? nix::settings.thisSystem : step->drv->platform))
|
||||
return false;
|
||||
|
||||
/* Check that the step requires all mandatory features of this
|
||||
machine. (Thus, a machine with the mandatory "benchmark"
|
||||
feature will *only* execute steps that require
|
||||
"benchmark".) The "preferLocalBuild" bit of a step is
|
||||
mapped to the "local" feature; thus machines that have
|
||||
"local" as a mandatory feature will only do
|
||||
preferLocalBuild steps. */
|
||||
for (auto & f : mandatoryFeatures)
|
||||
if (!step->requiredSystemFeatures.count(f)
|
||||
&& !(f == "local" && step->preferLocalBuild))
|
||||
return false;
|
||||
|
||||
/* Check that the machine supports all features required by
|
||||
the step. */
|
||||
for (auto & f : step->requiredSystemFeatures)
|
||||
if (!supportedFeatures.count(f)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool isLocalhost() const;
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
class HydraConfig;
|
||||
|
||||
|
||||
class State
|
||||
{
|
||||
private:
|
||||
|
||||
std::unique_ptr<HydraConfig> config;
|
||||
|
||||
// FIXME: Make configurable.
|
||||
const unsigned int maxTries = 5;
|
||||
const unsigned int retryInterval = 60; // seconds
|
||||
const float retryBackoff = 3.0;
|
||||
const unsigned int maxParallelCopyClosure = 4;
|
||||
|
||||
/* Time in seconds before unsupported build steps are aborted. */
|
||||
const unsigned int maxUnsupportedTime = 0;
|
||||
|
||||
nix::Path hydraData, logDir;
|
||||
|
||||
bool useSubstitutes = false;
|
||||
|
||||
/* The queued builds. */
|
||||
typedef std::map<BuildID, Build::ptr> Builds;
|
||||
nix::Sync<Builds> builds;
|
||||
|
||||
/* The jobsets. */
|
||||
typedef std::map<std::pair<std::string, std::string>, Jobset::ptr> Jobsets;
|
||||
nix::Sync<Jobsets> jobsets;
|
||||
|
||||
/* All active or pending build steps (i.e. dependencies of the
|
||||
queued builds). Note that these are weak pointers. Steps are
|
||||
kept alive by being reachable from Builds or by being in
|
||||
progress. */
|
||||
typedef std::map<nix::StorePath, Step::wptr> Steps;
|
||||
nix::Sync<Steps> steps;
|
||||
|
||||
/* Build steps that have no unbuilt dependencies. */
|
||||
typedef std::list<Step::wptr> Runnable;
|
||||
nix::Sync<Runnable> runnable;
|
||||
|
||||
/* CV for waking up the dispatcher. */
|
||||
nix::Sync<bool> dispatcherWakeup;
|
||||
std::condition_variable dispatcherWakeupCV;
|
||||
|
||||
/* PostgreSQL connection pool. */
|
||||
nix::Pool<Connection> dbPool;
|
||||
|
||||
/* The build machines. */
|
||||
std::mutex machinesReadyLock;
|
||||
typedef std::map<nix::StoreReference::Variant, Machine::ptr> Machines;
|
||||
nix::Sync<Machines> machines; // FIXME: use atomic_shared_ptr
|
||||
|
||||
/* Throttler for CPU-bound local work. */
|
||||
static constexpr unsigned int maxSupportedLocalWorkers = 1024;
|
||||
std::counting_semaphore<maxSupportedLocalWorkers> localWorkThrottler;
|
||||
|
||||
/* Various stats. */
|
||||
time_t startedAt;
|
||||
counter nrBuildsRead{0};
|
||||
counter buildReadTimeMs{0};
|
||||
counter nrBuildsDone{0};
|
||||
counter nrStepsStarted{0};
|
||||
counter nrStepsDone{0};
|
||||
counter nrStepsBuilding{0};
|
||||
counter nrStepsCopyingTo{0};
|
||||
counter nrStepsWaitingForDownloadSlot{0};
|
||||
counter nrStepsCopyingFrom{0};
|
||||
counter nrStepsWaiting{0};
|
||||
counter nrUnsupportedSteps{0};
|
||||
counter nrRetries{0};
|
||||
counter maxNrRetries{0};
|
||||
counter totalStepTime{0}; // total time for steps, including closure copying
|
||||
counter totalStepBuildTime{0}; // total build time for steps
|
||||
counter nrQueueWakeups{0};
|
||||
counter nrDispatcherWakeups{0};
|
||||
counter dispatchTimeMs{0};
|
||||
counter bytesSent{0};
|
||||
counter bytesReceived{0};
|
||||
counter nrActiveDbUpdates{0};
|
||||
|
||||
/* Specific build to do for --build-one (testing only). */
|
||||
BuildID buildOne;
|
||||
bool buildOneDone = false;
|
||||
|
||||
/* Statistics per machine type for the Hydra auto-scaler. */
|
||||
struct MachineType
|
||||
{
|
||||
unsigned int runnable{0}, running{0};
|
||||
system_time lastActive;
|
||||
std::chrono::seconds waitTime; // time runnable steps have been waiting
|
||||
};
|
||||
|
||||
nix::Sync<std::map<std::string, MachineType>> machineTypes;
|
||||
|
||||
struct MachineReservation
|
||||
{
|
||||
State & state;
|
||||
Step::ptr step;
|
||||
Machine::ptr machine;
|
||||
MachineReservation(State & state, Step::ptr step, Machine::ptr machine);
|
||||
~MachineReservation();
|
||||
};
|
||||
|
||||
struct ActiveStep
|
||||
{
|
||||
Step::ptr step;
|
||||
|
||||
struct State
|
||||
{
|
||||
pid_t pid = -1;
|
||||
bool cancelled = false;
|
||||
};
|
||||
|
||||
nix::Sync<State> state_;
|
||||
};
|
||||
|
||||
nix::Sync<std::set<std::shared_ptr<ActiveStep>>> activeSteps_;
|
||||
|
||||
std::atomic<time_t> lastDispatcherCheck{0};
|
||||
|
||||
std::shared_ptr<nix::Store> localStore;
|
||||
std::shared_ptr<nix::Store> _destStore;
|
||||
|
||||
size_t maxOutputSize;
|
||||
size_t maxLogSize;
|
||||
|
||||
/* Steps that were busy while we encounted a PostgreSQL
|
||||
error. These need to be cleared at a later time to prevent them
|
||||
from showing up as busy until the queue runner is restarted. */
|
||||
nix::Sync<std::set<std::pair<BuildID, int>>> orphanedSteps;
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
/* Where to store GC roots. Defaults to
|
||||
/nix/var/nix/gcroots/per-user/$USER/hydra-roots, overridable
|
||||
via gc_roots_dir. */
|
||||
nix::Path rootsDir;
|
||||
|
||||
std::string metricsAddr;
|
||||
|
||||
struct PromMetrics
|
||||
{
|
||||
std::shared_ptr<prometheus::Registry> registry;
|
||||
|
||||
prometheus::Counter& queue_checks_started;
|
||||
prometheus::Counter& queue_build_loads;
|
||||
prometheus::Counter& queue_steps_created;
|
||||
prometheus::Counter& queue_checks_early_exits;
|
||||
prometheus::Counter& queue_checks_finished;
|
||||
|
||||
prometheus::Counter& dispatcher_time_spent_running;
|
||||
prometheus::Counter& dispatcher_time_spent_waiting;
|
||||
|
||||
prometheus::Counter& queue_monitor_time_spent_running;
|
||||
prometheus::Counter& queue_monitor_time_spent_waiting;
|
||||
|
||||
PromMetrics();
|
||||
};
|
||||
PromMetrics prom;
|
||||
|
||||
public:
|
||||
State(std::optional<std::string> metricsAddrOpt);
|
||||
|
||||
private:
|
||||
|
||||
nix::MaintainCount<counter> startDbUpdate();
|
||||
|
||||
/* Return a store object to store build results. */
|
||||
nix::ref<nix::Store> getDestStore();
|
||||
|
||||
void clearBusy(Connection & conn, time_t stopTime);
|
||||
|
||||
void parseMachines(const std::string & contents);
|
||||
|
||||
/* Thread to reload /etc/nix/machines periodically. */
|
||||
void monitorMachinesFile();
|
||||
|
||||
unsigned int allocBuildStep(pqxx::work & txn, BuildID buildId);
|
||||
|
||||
unsigned int createBuildStep(pqxx::work & txn, time_t startTime, BuildID buildId, Step::ptr step,
|
||||
const std::string & machine, BuildStatus status, const std::string & errorMsg = "",
|
||||
BuildID propagatedFrom = 0);
|
||||
|
||||
void updateBuildStep(pqxx::work & txn, BuildID buildId, unsigned int stepNr, StepState stepState);
|
||||
|
||||
void finishBuildStep(pqxx::work & txn, const RemoteResult & result, BuildID buildId, unsigned int stepNr,
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn, nix::ref<nix::Store> destStore);
|
||||
|
||||
/* Handle cancellation, deletion and priority bumps. */
|
||||
void processQueueChange(Connection & conn);
|
||||
|
||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||
const nix::StorePath & drvPath);
|
||||
|
||||
/* Returns paths missing from the remote store. Paths are processed in
|
||||
* parallel to work around the possible latency of remote stores. */
|
||||
std::map<nix::DrvOutput, std::optional<nix::StorePath>> getMissingRemotePaths(
|
||||
nix::ref<nix::Store> destStore,
|
||||
const std::map<nix::DrvOutput, std::optional<nix::StorePath>> & paths);
|
||||
|
||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||
Build::ptr referringBuild, Step::ptr referringStep, std::set<nix::StorePath> & finishedDrvs,
|
||||
std::set<Step::ptr> & newSteps, std::set<Step::ptr> & newRunnable);
|
||||
|
||||
void failStep(
|
||||
Connection & conn,
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
Machine::ptr machine,
|
||||
bool & stepFinished);
|
||||
|
||||
Jobset::ptr createJobset(pqxx::work & txn,
|
||||
const std::string & projectName, const std::string & jobsetName, const JobsetID);
|
||||
|
||||
void processJobsetSharesChange(Connection & conn);
|
||||
|
||||
void makeRunnable(Step::ptr step);
|
||||
|
||||
/* The thread that selects and starts runnable builds. */
|
||||
void dispatcher();
|
||||
|
||||
system_time doDispatch();
|
||||
|
||||
void wakeDispatcher();
|
||||
|
||||
void abortUnsupported();
|
||||
|
||||
void builder(std::unique_ptr<MachineReservation> reservation);
|
||||
|
||||
/* Perform the given build step. Return true if the step is to be
|
||||
retried. */
|
||||
enum StepResult { sDone, sRetry, sMaybeCancelled };
|
||||
StepResult doBuildStep(nix::ref<nix::Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
std::shared_ptr<ActiveStep> activeStep);
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
std::unique_ptr<MachineReservation> reservation,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
|
||||
void markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
const BuildOutput & res, bool isCachedBuild, time_t startTime, time_t stopTime);
|
||||
|
||||
bool checkCachedFailure(Step::ptr step, Connection & conn);
|
||||
|
||||
void notifyBuildStarted(pqxx::work & txn, BuildID buildId);
|
||||
|
||||
void notifyBuildFinished(pqxx::work & txn, BuildID buildId,
|
||||
const std::vector<BuildID> & dependentIds);
|
||||
|
||||
/* Acquire the global queue runner lock, or null if somebody else
|
||||
has it. */
|
||||
std::shared_ptr<nix::PathLocks> acquireGlobalLock();
|
||||
|
||||
void dumpStatus(Connection & conn);
|
||||
|
||||
void addRoot(const nix::StorePath & storePath);
|
||||
|
||||
void runMetricsExporter();
|
||||
|
||||
public:
|
||||
|
||||
void showStatus();
|
||||
|
||||
void unlock();
|
||||
|
||||
void run(BuildID buildOne = 0);
|
||||
};
|
||||
@@ -2,12 +2,11 @@ package Hydra;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use parent 'Catalyst';
|
||||
use Moose;
|
||||
use Hydra::Plugin;
|
||||
use Hydra::Model::DB;
|
||||
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||
use Hydra::Helper::Nix;
|
||||
|
||||
use Catalyst::Runtime '5.70';
|
||||
|
||||
use parent qw/Catalyst/;
|
||||
use Catalyst qw/ConfigLoader
|
||||
Static::Simple
|
||||
StackTrace
|
||||
@@ -16,89 +15,43 @@ use Catalyst qw/ConfigLoader
|
||||
Session
|
||||
Session::Store::FastMmap
|
||||
Session::State::Cookie
|
||||
Captcha
|
||||
PrometheusTiny/,
|
||||
'-Log=warn,fatal,error';
|
||||
use CatalystX::RoleApplicator;
|
||||
use Path::Class 'file';
|
||||
|
||||
AccessLog
|
||||
/;
|
||||
our $VERSION = '0.01';
|
||||
|
||||
__PACKAGE__->config(
|
||||
name => 'Hydra',
|
||||
default_view => "TT",
|
||||
'Plugin::Authentication' => {
|
||||
session => {
|
||||
storage => getHydraPath . "/session_data"
|
||||
},
|
||||
authentication => {
|
||||
default_realm => "dbic",
|
||||
|
||||
dbic => {
|
||||
credential => {
|
||||
class => "Password",
|
||||
password_field => "password",
|
||||
password_type => "self_check",
|
||||
},
|
||||
store => {
|
||||
class => "DBIx::Class",
|
||||
user_class => "DB::Users",
|
||||
role_relation => "userroles",
|
||||
role_field => "role",
|
||||
realms => {
|
||||
dbic => {
|
||||
credential => {
|
||||
class => "Password",
|
||||
password_field => "password",
|
||||
password_type => "hashed",
|
||||
password_hash_type => "SHA-1",
|
||||
},
|
||||
store => {
|
||||
class => "DBIx::Class",
|
||||
user_class => "DB::Users",
|
||||
role_relation => "userroles",
|
||||
role_field => "role",
|
||||
},
|
||||
},
|
||||
},
|
||||
ldap => getLDAPConfigAmbient()->{'config'}
|
||||
},
|
||||
'Plugin::ConfigLoader' => {
|
||||
driver => {
|
||||
'General' => \%Hydra::Config::configGeneralOpts
|
||||
}
|
||||
},
|
||||
'Plugin::PrometheusTiny' => {
|
||||
include_action_labels => 1,
|
||||
},
|
||||
'Plugin::Static::Simple' => {
|
||||
send_etag => 1,
|
||||
expires => 3600
|
||||
},
|
||||
'View::JSON' => {
|
||||
expose_stash => 'json'
|
||||
expose_stash => qr/^json/,
|
||||
},
|
||||
'Plugin::Session' => {
|
||||
expires => 3600 * 24 * 7,
|
||||
storage => Hydra::Model::DB::getHydraPath . "/www/session_data",
|
||||
unlink_on_exit => 0
|
||||
},
|
||||
'Plugin::Captcha' => {
|
||||
session_name => 'hydra-captcha',
|
||||
new => {
|
||||
width => 270,
|
||||
height => 80,
|
||||
ptsize => 20,
|
||||
lines => 30,
|
||||
thickness => 1,
|
||||
rndmax => 5,
|
||||
scramble => 1,
|
||||
#send_ctobg => 1,
|
||||
bgcolor => '#ffffff',
|
||||
font => __PACKAGE__->path_to("ttf/StayPuft.ttf"),
|
||||
},
|
||||
create => [ qw/ttf circle/ ],
|
||||
particle => [ 3500 ],
|
||||
out => { force => 'jpeg' }
|
||||
expires => 3600 * 24 * 2,
|
||||
},
|
||||
);
|
||||
|
||||
__PACKAGE__->apply_request_class_roles(qw/Catalyst::TraitFor::Request::ProxyBase/);
|
||||
|
||||
my $plugins;
|
||||
|
||||
has 'hydra_plugins' => (
|
||||
is => 'ro',
|
||||
default => sub { return $plugins; }
|
||||
);
|
||||
|
||||
after setup_finalize => sub {
|
||||
my $class = shift;
|
||||
$plugins = [Hydra::Plugin->instantiate(db => $class->model('DB'), config => $class->config)];
|
||||
};
|
||||
|
||||
__PACKAGE__->setup();
|
||||
|
||||
1;
|
||||
|
||||
@@ -7,6 +7,44 @@ use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
|
||||
sub getJobStatus {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $maintainer = $c->request->params->{"maintainer"};
|
||||
|
||||
my $latest = $c->stash->{jobStatus}->search(
|
||||
defined $maintainer ? { maintainers => { like => "%$maintainer%" } } : {},
|
||||
{ '+select' => ["me.statusChangeId", "me.statusChangeTime"]
|
||||
, '+as' => ["statusChangeId", "statusChangeTime"]
|
||||
, order_by => "coalesce(statusChangeTime, 0) desc"
|
||||
});
|
||||
|
||||
return $latest;
|
||||
}
|
||||
|
||||
sub jobstatus : Chained('get_builds') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'jobstatus.tt';
|
||||
$c->stash->{latestBuilds} = [getJobStatus($self, $c)->all];
|
||||
}
|
||||
|
||||
|
||||
# A convenient way to see all the errors - i.e. things demanding
|
||||
# attention - at a glance.
|
||||
sub errors : Chained('get_builds') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'errors.tt';
|
||||
$c->stash->{brokenJobsets} =
|
||||
[$c->stash->{allJobsets}->search({errormsg => {'!=' => ''}})]
|
||||
if defined $c->stash->{allJobsets};
|
||||
$c->stash->{brokenJobs} =
|
||||
[$c->stash->{allJobs}->search({errormsg => {'!=' => ''}})]
|
||||
if defined $c->stash->{allJobs};
|
||||
$c->stash->{brokenBuilds} =
|
||||
[getJobStatus($self, $c)->search({buildStatus => {'!=' => 0}})];
|
||||
}
|
||||
|
||||
|
||||
sub all : Chained('get_builds') PathPart {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
@@ -16,32 +54,37 @@ sub all : Chained('get_builds') PathPart {
|
||||
|
||||
my $resultsPerPage = 20;
|
||||
|
||||
my $nrBuilds = $c->stash->{allBuilds}->search({finished => 1})->count;
|
||||
|
||||
$c->stash->{baseUri} = $c->uri_for($self->action_for("all"), $c->req->captures);
|
||||
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $c->stash->{allBuilds}->search({finished => 1})->count
|
||||
unless defined $c->stash->{total};
|
||||
$c->stash->{totalBuilds} = $nrBuilds;
|
||||
|
||||
$c->stash->{builds} = [ $c->stash->{allBuilds}->search(
|
||||
{ finished => 1 },
|
||||
{ order_by => "stoptime DESC"
|
||||
{ order_by => "timestamp DESC"
|
||||
, columns => [@buildListColumns]
|
||||
, rows => $resultsPerPage
|
||||
, page => $page }) ];
|
||||
}
|
||||
|
||||
|
||||
sub nix : Chained('get_builds') PathPart('channel/latest') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{channelName} = $c->stash->{channelBaseName} . "-latest";
|
||||
$c->stash->{channelBuilds} = $c->stash->{latestSucceeded}
|
||||
->search_literal("exists (select 1 from buildproducts where build = me.id and type = 'nix-build')")
|
||||
->search({}, { columns => [@buildListColumns, 'drvpath', 'description', 'homepage']
|
||||
, join => ["buildoutputs"]
|
||||
, order_by => ["me.id", "buildoutputs.name"]
|
||||
, '+select' => ['buildoutputs.path', 'buildoutputs.name'], '+as' => ['outpath', 'outname'] });
|
||||
sub nix : Chained('get_builds') PathPart('channel') CaptureArgs(1) {
|
||||
my ($self, $c, $channelName) = @_;
|
||||
eval {
|
||||
if ($channelName eq "latest") {
|
||||
$c->stash->{channelName} = $c->stash->{channelBaseName} . "-latest";
|
||||
$c->stash->{channelBuilds} = $c->stash->{latestSucceeded}
|
||||
->search_literal("exists (select 1 from buildproducts where build = me.id and type = 'nix-build')")
|
||||
->search({}, { columns => [@buildListColumns, 'drvpath', 'outpath', 'description', 'homepage'] });
|
||||
}
|
||||
else {
|
||||
notFound($c, "Unknown channel `$channelName'.");
|
||||
}
|
||||
};
|
||||
error($c, $@) if $@;
|
||||
}
|
||||
|
||||
|
||||
@@ -49,12 +92,12 @@ sub nix : Chained('get_builds') PathPart('channel/latest') CaptureArgs(0) {
|
||||
sub latest : Chained('get_builds') PathPart('latest') {
|
||||
my ($self, $c, @rest) = @_;
|
||||
|
||||
my $latest = $c->stash->{allBuilds}->find(
|
||||
{ finished => 1, buildstatus => 0 }, { order_by => ["id DESC"], rows => 1 });
|
||||
my ($latest) = $c->stash->{allBuilds}->search(
|
||||
{finished => 1, buildstatus => 0}, {order_by => ["isCurrent DESC", "timestamp DESC"]});
|
||||
|
||||
notFound($c, "There is no successful build to redirect to.") unless defined $latest;
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Build')->action_for("build"), [$latest->id], @rest));
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Build')->action_for("view_build"), [$latest->id], @rest));
|
||||
}
|
||||
|
||||
|
||||
@@ -63,31 +106,13 @@ sub latest_for : Chained('get_builds') PathPart('latest-for') {
|
||||
my ($self, $c, $system, @rest) = @_;
|
||||
|
||||
notFound($c, "You need to specify a platform type in the URL.") unless defined $system;
|
||||
|
||||
my $latest = $c->stash->{allBuilds}->find(
|
||||
{ finished => 1, buildstatus => 0, system => $system }, { order_by => ["id DESC"], rows => 1 });
|
||||
|
||||
my ($latest) = $c->stash->{allBuilds}->search(
|
||||
{finished => 1, buildstatus => 0, system => $system}, {order_by => ["isCurrent DESC", "timestamp DESC"]});
|
||||
|
||||
notFound($c, "There is no successful build for platform `$system' to redirect to.") unless defined $latest;
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Build')->action_for("build"), [$latest->id], @rest));
|
||||
}
|
||||
|
||||
|
||||
# Redirect to the latest successful build in a finished evaluation
|
||||
# (i.e. an evaluation that has no unfinished builds).
|
||||
sub latest_finished : Chained('get_builds') PathPart('latest-finished') {
|
||||
my ($self, $c, @rest) = @_;
|
||||
|
||||
my $latest = $c->stash->{allBuilds}->find(
|
||||
{ finished => 1, buildstatus => 0 },
|
||||
{ order_by => ["id DESC"], rows => 1, join => ["jobsetevalmembers"]
|
||||
, where => \
|
||||
"not exists (select 1 from jobsetevalmembers m2 join builds b2 on jobsetevalmembers.eval = m2.eval and m2.build = b2.id and b2.finished = 0)"
|
||||
});
|
||||
|
||||
notFound($c, "There is no successful build to redirect to.") unless defined $latest;
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Build')->action_for("build"), [$latest->id], @rest));
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Build')->action_for("view_build"), [$latest->id], @rest));
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ package Hydra::Base::Controller::NixChannel;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use List::SomeUtils qw(any);
|
||||
use base 'Catalyst::Controller';
|
||||
use Nix::Store;
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
@@ -11,40 +11,26 @@ use Hydra::Helper::CatalystUtils;
|
||||
sub getChannelData {
|
||||
my ($c, $checkValidity) = @_;
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
my @storePaths = ();
|
||||
$c->stash->{nixPkgs} = [];
|
||||
|
||||
my @builds = $c->stash->{channelBuilds}->all;
|
||||
|
||||
for (my $n = 0; $n < scalar @builds; ) {
|
||||
# Since channelData is a join of Builds and BuildOutputs, we
|
||||
# need to gather the rows that belong to a single build.
|
||||
my $build = $builds[$n++];
|
||||
my @outputs = ($build);
|
||||
push @outputs, $builds[$n++] while $n < scalar @builds && $builds[$n]->id == $build->id;
|
||||
@outputs = grep { $_->get_column("outpath") } @outputs;
|
||||
|
||||
my $outputs = {};
|
||||
foreach my $output (@outputs) {
|
||||
my $outPath = $output->get_column("outpath");
|
||||
next if $checkValidity && !$MACHINE_LOCAL_STORE->isValidPath($outPath);
|
||||
$outputs->{$output->get_column("outname")} = $outPath;
|
||||
push @storePaths, $outPath;
|
||||
# Put the system type in the manifest (for top-level
|
||||
# paths) as a hint to the binary patch generator. (It
|
||||
# shouldn't try to generate patches between builds for
|
||||
# different systems.) It would be nice if Nix stored this
|
||||
# info for every path but it doesn't.
|
||||
$c->stash->{systemForPath}->{$outPath} = $build->system;
|
||||
}
|
||||
|
||||
next if !%$outputs;
|
||||
|
||||
foreach my $build ($c->stash->{channelBuilds}->all) {
|
||||
next if $checkValidity && !isValidPath($build->outpath);
|
||||
#if (isValidPath($build->drvpath)) {
|
||||
# # Adding `drvpath' implies adding `outpath' because of the
|
||||
# # `--include-outputs' flag passed to `nix-store'.
|
||||
# push @storePaths, $build->drvpath;
|
||||
#} else {
|
||||
# push @storePaths, $build->outpath;
|
||||
#}
|
||||
push @storePaths, $build->outpath;
|
||||
my $pkgName = $build->nixname . "-" . $build->system . "-" . $build->id;
|
||||
push @{$c->stash->{nixPkgs}}, { build => $build, name => $pkgName, outputs => $outputs };
|
||||
}
|
||||
$c->stash->{nixPkgs}->{"${pkgName}.nixpkg"} = {build => $build, name => $pkgName};
|
||||
# Put the system type in the manifest (for top-level paths) as
|
||||
# a hint to the binary patch generator. (It shouldn't try to
|
||||
# generate patches between builds for different systems.) It
|
||||
# would be nice if Nix stored this info for every path but it
|
||||
# doesn't.
|
||||
$c->stash->{systemForPath}->{$build->outpath} = $build->system;
|
||||
};
|
||||
|
||||
$c->stash->{storePaths} = [@storePaths];
|
||||
}
|
||||
@@ -52,15 +38,10 @@ sub getChannelData {
|
||||
|
||||
sub closure : Chained('nix') PathPart {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
$c->stash->{current_view} = 'NixClosure';
|
||||
|
||||
getChannelData($c, 1);
|
||||
|
||||
# FIXME: get the closure of the selected path only.
|
||||
|
||||
# !!! quick hack; this is to make HEAD requests return the right
|
||||
# MIME type. This is set in the view as well, but the view isn't
|
||||
# called for HEAD requests. There should be a cleaner solution...
|
||||
@@ -70,26 +51,38 @@ sub closure : Chained('nix') PathPart {
|
||||
|
||||
sub manifest : Chained('nix') PathPart("MANIFEST") Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireLocalStore($c);
|
||||
$c->stash->{current_view} = 'NixManifest';
|
||||
$c->stash->{narBase} = $c->uri_for($c->controller('Root')->action_for("nar"));
|
||||
getChannelData($c, 1);
|
||||
}
|
||||
|
||||
|
||||
sub nixexprs : Chained('nix') PathPart('nixexprs.tar.bz2') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireLocalStore($c);
|
||||
$c->stash->{current_view} = 'NixExprs';
|
||||
getChannelData($c, 1);
|
||||
sub pkg : Chained('nix') PathPart Args(1) {
|
||||
my ($self, $c, $pkgName) = @_;
|
||||
|
||||
if (!$c->stash->{build}) {
|
||||
$pkgName =~ /-(\d+)\.nixpkg$/ or notFound($c, "Bad package name.");
|
||||
$c->stash->{build} = $c->stash->{channelBuilds}->find({ id => $1 })
|
||||
|| notFound($c, "No such package in this channel.");
|
||||
}
|
||||
|
||||
if (!isValidPath($c->stash->{build}->outpath)) {
|
||||
$c->response->status(410); # "Gone"
|
||||
error($c, "Build " . $c->stash->{build}->id . " is no longer available.");
|
||||
}
|
||||
|
||||
$c->stash->{manifestUri} = $c->uri_for($self->action_for("manifest"), $c->req->captures);
|
||||
|
||||
$c->stash->{current_view} = 'NixPkg';
|
||||
|
||||
$c->response->content_type('application/nix-package');
|
||||
}
|
||||
|
||||
|
||||
sub binary_cache_url : Chained('nix') PathPart('binary-cache-url') Args(0) {
|
||||
sub nixexprs : Chained('nix') PathPart('nixexprs.tar.bz2') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{'plain'} = { data => $c->uri_for('/') };
|
||||
$c->response->content_type('text/plain');
|
||||
$c->forward('Hydra::View::Plain');
|
||||
$c->stash->{current_view} = 'NixExprs';
|
||||
getChannelData($c, 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -109,14 +102,12 @@ sub sortPkgs {
|
||||
|
||||
sub channel_contents : Chained('nix') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireLocalStore($c);
|
||||
# Optimistically assume that none of the packages have been
|
||||
# garbage-collected. That should be true for the "latest"
|
||||
# channel.
|
||||
getChannelData($c, 0);
|
||||
$c->stash->{genericChannel} = 1;
|
||||
$c->stash->{template} = 'channel-contents.tt';
|
||||
$c->stash->{nixPkgs} = [sortPkgs @{$c->stash->{nixPkgs}}];
|
||||
$c->stash->{nixPkgs} = [sortPkgs (values %{$c->stash->{nixPkgs}})];
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
package Hydra::Base::Controller::REST;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Catalyst::Controller::REST';
|
||||
|
||||
# Hack: Erase the map set by C::C::REST
|
||||
__PACKAGE__->config( map => undef );
|
||||
__PACKAGE__->config(
|
||||
map => {
|
||||
'application/json' => 'JSON',
|
||||
'text/x-json' => 'JSON',
|
||||
'text/html' => [ 'View', 'TT' ]
|
||||
},
|
||||
default => 'text/html',
|
||||
'stash_key' => 'resource',
|
||||
);
|
||||
|
||||
sub begin { my ( $self, $c ) = @_; $c->forward('Hydra::Controller::Root::begin'); }
|
||||
sub end { my ( $self, $c ) = @_; $c->forward('Hydra::Controller::Root::end'); }
|
||||
|
||||
1;
|
||||
@@ -1,46 +0,0 @@
|
||||
use utf8;
|
||||
package Hydra::Component::ToJSON;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use base 'DBIx::Class';
|
||||
use JSON::MaybeXS;
|
||||
|
||||
sub TO_JSON {
|
||||
my $self = shift;
|
||||
|
||||
if ($self->can("as_json")) {
|
||||
return $self->as_json();
|
||||
}
|
||||
|
||||
my $hint = $self->json_hint;
|
||||
|
||||
my %json = ();
|
||||
|
||||
foreach my $column (@{$hint->{columns}}) {
|
||||
$json{$column} = $self->get_column($column);
|
||||
}
|
||||
|
||||
foreach my $column (@{$hint->{string_columns}}) {
|
||||
$json{$column} = $self->get_column($column) // "";
|
||||
}
|
||||
|
||||
foreach my $column (@{$hint->{boolean_columns}}) {
|
||||
$json{$column} = $self->get_column($column) ? JSON::MaybeXS::true : JSON::MaybeXS::false;
|
||||
}
|
||||
|
||||
foreach my $relname (keys %{$hint->{relations}}) {
|
||||
my $key = $hint->{relations}->{$relname};
|
||||
$json{$relname} = [ map { $_->$key } $self->$relname ];
|
||||
}
|
||||
|
||||
foreach my $relname (keys %{$hint->{eager_relations}}) {
|
||||
my $key = $hint->{eager_relations}->{$relname};
|
||||
$json{$relname} = { map { $_->$key => $_ } $self->$relname };
|
||||
}
|
||||
|
||||
return \%json;
|
||||
}
|
||||
|
||||
1;
|
||||
@@ -1,168 +0,0 @@
|
||||
package Hydra::Config;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Config::General;
|
||||
use List::SomeUtils qw(none);
|
||||
use YAML qw(LoadFile);
|
||||
|
||||
our @ISA = qw(Exporter);
|
||||
our @EXPORT = qw(
|
||||
getHydraConfig
|
||||
getLDAPConfig
|
||||
getLDAPConfigAmbient
|
||||
);
|
||||
|
||||
our %configGeneralOpts = (-UseApacheInclude => 1, -IncludeAgain => 1, -IncludeRelative => 1);
|
||||
|
||||
my $hydraConfigCache;
|
||||
|
||||
sub getHydraConfig {
|
||||
return $hydraConfigCache if defined $hydraConfigCache;
|
||||
|
||||
my $conf;
|
||||
|
||||
if ($ENV{"HYDRA_CONFIG"}) {
|
||||
$conf = $ENV{"HYDRA_CONFIG"};
|
||||
} else {
|
||||
require Hydra::Model::DB;
|
||||
$conf = Hydra::Model::DB::getHydraPath() . "/hydra.conf"
|
||||
};
|
||||
|
||||
if (-f $conf) {
|
||||
$hydraConfigCache = loadConfig($conf);
|
||||
} else {
|
||||
$hydraConfigCache = {};
|
||||
}
|
||||
|
||||
return $hydraConfigCache;
|
||||
}
|
||||
|
||||
sub loadConfig {
|
||||
my ($sourceFile) = @_;
|
||||
|
||||
my %opts = (%configGeneralOpts, -ConfigFile => $sourceFile);
|
||||
|
||||
return { Config::General->new(%opts)->getall };
|
||||
}
|
||||
|
||||
sub is_ldap_in_legacy_mode {
|
||||
my ($config, %env) = @_;
|
||||
|
||||
my $legacy_defined = defined $env{"HYDRA_LDAP_CONFIG"};
|
||||
|
||||
if (defined $config->{"ldap"}) {
|
||||
if ($legacy_defined) {
|
||||
die "The legacy environment variable HYDRA_LDAP_CONFIG is set, but config is also specified in hydra.conf. Please unset the environment variable.";
|
||||
}
|
||||
|
||||
return 0;
|
||||
} elsif ($legacy_defined) {
|
||||
warn "Hydra is configured to use LDAP via the HYDRA_LDAP_CONFIG, a deprecated method. Please see the docs about configuring LDAP in the hydra.conf.";
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
sub getLDAPConfigAmbient {
|
||||
return getLDAPConfig(getHydraConfig(), %ENV);
|
||||
}
|
||||
|
||||
sub getLDAPConfig {
|
||||
my ($config, %env) = @_;
|
||||
|
||||
my $ldap_config;
|
||||
|
||||
if (is_ldap_in_legacy_mode($config, %env)) {
|
||||
$ldap_config = get_legacy_ldap_config($env{"HYDRA_LDAP_CONFIG"});
|
||||
} else {
|
||||
$ldap_config = $config->{"ldap"};
|
||||
}
|
||||
|
||||
$ldap_config->{"role_mapping"} = normalize_ldap_role_mappings($ldap_config->{"role_mapping"});
|
||||
|
||||
return $ldap_config;
|
||||
}
|
||||
|
||||
sub get_legacy_ldap_config {
|
||||
my ($ldap_yaml_file) = @_;
|
||||
|
||||
return {
|
||||
config => LoadFile($ldap_yaml_file),
|
||||
role_mapping => {
|
||||
"hydra_admin" => [ "admin" ],
|
||||
"hydra_bump-to-front" => [ "bump-to-front" ],
|
||||
"hydra_cancel-build" => [ "cancel-build" ],
|
||||
"hydra_create-projects" => [ "create-projects" ],
|
||||
"hydra_eval-jobset" => [ "eval-jobset" ],
|
||||
"hydra_restart-jobs" => [ "restart-jobs" ],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
sub normalize_ldap_role_mappings {
|
||||
my ($input_map) = @_;
|
||||
|
||||
my $mapping = {};
|
||||
|
||||
my @errors;
|
||||
|
||||
for my $group (keys %{$input_map}) {
|
||||
my $input = $input_map->{$group};
|
||||
|
||||
if (ref $input eq "ARRAY") {
|
||||
$mapping->{$group} = $input;
|
||||
} elsif (ref $input eq "") {
|
||||
$mapping->{$group} = [ $input ];
|
||||
} else {
|
||||
push @errors, "On group '$group': the value is of type ${\ref $input}. Only strings and lists are acceptable.";
|
||||
$mapping->{$group} = [ ];
|
||||
}
|
||||
|
||||
eval {
|
||||
validate_roles($mapping->{$group});
|
||||
};
|
||||
if ($@) {
|
||||
push @errors, "On group '$group': $@";
|
||||
}
|
||||
}
|
||||
|
||||
if (@errors) {
|
||||
die "Failed to normalize LDAP role mappings:\n" . (join "\n", @errors);
|
||||
}
|
||||
|
||||
return $mapping;
|
||||
}
|
||||
|
||||
sub validate_roles {
|
||||
my ($roles) = @_;
|
||||
|
||||
my @invalid;
|
||||
my $valid = valid_roles();
|
||||
|
||||
for my $role (@$roles) {
|
||||
if (none { $_ eq $role } @$valid) {
|
||||
push @invalid, "'$role'";
|
||||
}
|
||||
}
|
||||
|
||||
if (@invalid) {
|
||||
die "Invalid roles: ${\join ', ', @invalid}. Valid roles are: ${\join ', ', @$valid}.";
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sub valid_roles {
|
||||
return [
|
||||
"admin",
|
||||
"bump-to-front",
|
||||
"cancel-build",
|
||||
"create-projects",
|
||||
"eval-jobset",
|
||||
"restart-jobs",
|
||||
];
|
||||
}
|
||||
|
||||
1;
|
||||
@@ -1,20 +1,19 @@
|
||||
package Hydra::Controller::API;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use base 'Catalyst::Controller';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::AddBuilds;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Controller::Project;
|
||||
use JSON::MaybeXS;
|
||||
use JSON::Any;
|
||||
use DateTime;
|
||||
use Digest::SHA qw(sha256_hex);
|
||||
use Text::Diff;
|
||||
use IPC::Run qw(run);
|
||||
use Digest::SHA qw(hmac_sha256_hex);
|
||||
use String::Compare::ConstantTime qw(equals);
|
||||
use IPC::Run3;
|
||||
use File::Slurp;
|
||||
|
||||
# !!! Rewrite this to use View::JSON.
|
||||
|
||||
|
||||
sub api : Chained('/') PathPart('api') CaptureArgs(0) {
|
||||
@@ -23,12 +22,38 @@ sub api : Chained('/') PathPart('api') CaptureArgs(0) {
|
||||
}
|
||||
|
||||
|
||||
sub projectToHash {
|
||||
my ($project) = @_;
|
||||
return {
|
||||
name => $project->name,
|
||||
description => $project->description
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
sub projects : Chained('api') PathPart('projects') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my @projects = $c->model('DB::Projects')->search({hidden => 0}, {order_by => 'name'});
|
||||
|
||||
my @list;
|
||||
foreach my $p (@projects) {
|
||||
push @list, projectToHash($p);
|
||||
}
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
sub buildToHash {
|
||||
my ($build) = @_;
|
||||
my $result = {
|
||||
id => $build->id,
|
||||
project => $build->jobset->get_column("project"),
|
||||
jobset => $build->jobset->get_column("name"),
|
||||
project => $build->get_column("project"),
|
||||
jobset => $build->get_column("jobset"),
|
||||
job => $build->get_column("job"),
|
||||
system => $build->system,
|
||||
nixname => $build->nixname,
|
||||
@@ -36,12 +61,13 @@ sub buildToHash {
|
||||
timestamp => $build->timestamp
|
||||
};
|
||||
|
||||
if($build->finished) {
|
||||
if($build->finished) {
|
||||
$result->{'buildstatus'} = $build->get_column("buildstatus");
|
||||
} else {
|
||||
$result->{'busy'} = $build->get_column("busy");
|
||||
$result->{'priority'} = $build->get_column("priority");
|
||||
}
|
||||
|
||||
|
||||
return $result;
|
||||
};
|
||||
|
||||
@@ -55,26 +81,20 @@ sub latestbuilds : Chained('api') PathPart('latestbuilds') Args(0) {
|
||||
my $jobset = $c->request->params->{jobset};
|
||||
my $job = $c->request->params->{job};
|
||||
my $system = $c->request->params->{system};
|
||||
|
||||
|
||||
my $filter = {finished => 1};
|
||||
$filter->{"jobset.project"} = $project if ! $project eq "";
|
||||
$filter->{"jobset.name"} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
my @latest = $c->model('DB::Builds')->search(
|
||||
$filter,
|
||||
{
|
||||
rows => $nr,
|
||||
order_by => ["id DESC"],
|
||||
join => [ "jobset" ]
|
||||
});
|
||||
|
||||
$filter->{project} = $project if ! $project eq "";
|
||||
$filter->{jobset} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
my @latest = $c->model('DB::Builds')->search($filter, {rows => $nr, order_by => ["timestamp DESC"] });
|
||||
|
||||
my @list;
|
||||
push @list, buildToHash($_) foreach @latest;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (encode_json(\@list))
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -83,21 +103,14 @@ sub latestbuilds : Chained('api') PathPart('latestbuilds') Args(0) {
|
||||
sub jobsetToHash {
|
||||
my ($jobset) = @_;
|
||||
return {
|
||||
project => $jobset->get_column('project'),
|
||||
name => $jobset->name,
|
||||
project => $jobset->project->name,
|
||||
name => $jobset->name,
|
||||
nrscheduled => $jobset->get_column("nrscheduled"),
|
||||
nrsucceeded => $jobset->get_column("nrsucceeded"),
|
||||
nrfailed => $jobset->get_column("nrfailed"),
|
||||
nrtotal => $jobset->get_column("nrtotal"),
|
||||
lastcheckedtime => $jobset->lastcheckedtime,
|
||||
starttime => $jobset->starttime,
|
||||
checkinterval => $jobset->checkinterval,
|
||||
triggertime => $jobset->triggertime,
|
||||
fetcherrormsg => $jobset->fetcherrormsg,
|
||||
errortime => $jobset->errortime,
|
||||
haserrormsg => defined($jobset->errormsg) && $jobset->errormsg ne "" ? JSON::MaybeXS::true : JSON::MaybeXS::false
|
||||
nrtotal => $jobset->get_column("nrtotal")
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub jobsets : Chained('api') PathPart('jobsets') Args(0) {
|
||||
@@ -110,12 +123,12 @@ sub jobsets : Chained('api') PathPart('jobsets') Args(0) {
|
||||
or notFound($c, "Project $projectName doesn't exist.");
|
||||
|
||||
my @jobsets = jobsetOverview($c, $project);
|
||||
|
||||
|
||||
my @list;
|
||||
push @list, jobsetToHash($_) foreach @jobsets;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (encode_json(\@list))
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -127,13 +140,13 @@ sub queue : Chained('api') PathPart('queue') Args(0) {
|
||||
my $nr = $c->request->params->{nr};
|
||||
error($c, "Parameter not defined!") if !defined $nr;
|
||||
|
||||
my @builds = $c->model('DB::Builds')->search({finished => 0}, {rows => $nr, order_by => ["priority DESC", "id"]});
|
||||
|
||||
my @builds = $c->model('DB::Builds')->search({finished => 0}, {rows => $nr, order_by => ["busy DESC", "priority DESC", "timestamp"]});
|
||||
|
||||
my @list;
|
||||
push @list, buildToHash($_) foreach @builds;
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (encode_json(\@list))
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@list))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
@@ -142,18 +155,28 @@ sub queue : Chained('api') PathPart('queue') Args(0) {
|
||||
sub nrqueue : Chained('api') PathPart('nrqueue') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $nrQueuedBuilds = $c->model('DB::Builds')->search({finished => 0})->count();
|
||||
$c->stash->{'plain'} = {
|
||||
$c->stash->{'plain'} = {
|
||||
data => "$nrQueuedBuilds"
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
sub nrrunning : Chained('api') PathPart('nrrunning') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $nrRunningBuilds = $c->model('DB::Builds')->search({finished => 0, busy => 1 })->count();
|
||||
$c->stash->{'plain'} = {
|
||||
data => "$nrRunningBuilds"
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
sub nrbuilds : Chained('api') PathPart('nrbuilds') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $nr = $c->request->params->{nr};
|
||||
my $period = $c->request->params->{period};
|
||||
|
||||
|
||||
error($c, "Parameter not defined!") if !defined $nr || !defined $period;
|
||||
my $base;
|
||||
|
||||
@@ -163,69 +186,49 @@ sub nrbuilds : Chained('api') PathPart('nrbuilds') Args(0) {
|
||||
my $system = $c->request->params->{system};
|
||||
|
||||
my $filter = {finished => 1};
|
||||
$filter->{"jobset.project"} = $project if ! $project eq "";
|
||||
$filter->{"jobset.name"} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
$filter->{project} = $project if ! $project eq "";
|
||||
$filter->{jobset} = $jobset if ! $jobset eq "";
|
||||
$filter->{job} = $job if !$job eq "";
|
||||
$filter->{system} = $system if !$system eq "";
|
||||
|
||||
$base = 60*60 if($period eq "hour");
|
||||
$base = 24*60*60 if($period eq "day");
|
||||
|
||||
my @stats = $c->model('DB::Builds')->search(
|
||||
$filter,
|
||||
{
|
||||
select => [{ count => "*" }],
|
||||
as => ["nr"],
|
||||
group_by => ["timestamp - timestamp % $base"],
|
||||
order_by => "timestamp - timestamp % $base DESC",
|
||||
rows => $nr,
|
||||
join => [ "jobset" ]
|
||||
}
|
||||
);
|
||||
|
||||
my @stats = $c->model('DB::Builds')->search($filter, {select => [{ count => "*" }], as => ["nr"], group_by => ["timestamp - timestamp % $base"], order_by => "timestamp - timestamp % $base DESC", rows => $nr});
|
||||
my @arr;
|
||||
push @arr, int($_->get_column("nr")) foreach @stats;
|
||||
@arr = reverse(@arr);
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (encode_json(\@arr))
|
||||
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (JSON::Any->objToJson(\@arr))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
sub scmdiff : Path('/api/scmdiff') Args(0) {
|
||||
sub scmdiff : Chained('api') PathPart('scmdiff') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $uri = $c->request->params->{uri};
|
||||
my $type = $c->request->params->{type};
|
||||
my $rev1 = $c->request->params->{rev1};
|
||||
my $rev2 = $c->request->params->{rev2};
|
||||
my $branch;
|
||||
|
||||
die("invalid revisions: [$rev1] [$rev2]") if $rev1 !~ m/^[a-zA-Z0-9_.]+$/ || $rev2 !~ m/^[a-zA-Z0-9_.]+$/;
|
||||
|
||||
# FIXME: injection danger.
|
||||
|
||||
my $diff = "";
|
||||
if ($type eq "hg") {
|
||||
my $clonePath = getSCMCacheDir . "/hg/" . sha256_hex($uri);
|
||||
die "repository '$uri' is not in the SCM cache\n" if ! -d $clonePath;
|
||||
my $out;
|
||||
run(["hg", "log", "-R", $clonePath, "-r", "reverse($rev1::$rev2) and not($rev1)"], \undef, \$out)
|
||||
or die "hg log failed";
|
||||
$diff .= $out;
|
||||
run(["hg", "diff", "-R", $clonePath, "-r", "$rev1::$rev2"], \undef, \$out)
|
||||
or die "hg diff failed";
|
||||
$diff .= $out;
|
||||
} elsif ($type eq "git") {
|
||||
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
|
||||
my $clonePath = scmPath . "/" . sha256_hex($uri);
|
||||
die if ! -d $clonePath;
|
||||
my ($stdout1, $stderr1);
|
||||
run3(['git', '-C', $clonePath, 'log', "$rev1..$rev2"], \undef, \$stdout1, \$stderr1);
|
||||
$diff .= $stdout1 if $? == 0;
|
||||
|
||||
my ($stdout2, $stderr2);
|
||||
run3(['git', '-C', $clonePath, 'diff', "$rev1..$rev2"], \undef, \$stdout2, \$stderr2);
|
||||
$diff .= $stdout2 if $? == 0;
|
||||
$branch = `(cd $clonePath; hg log --template '{branch}' -r $rev2)`;
|
||||
$diff .= `(cd $clonePath; hg log -r $rev1 -r $rev2 -b $branch)`;
|
||||
$diff .= `(cd $clonePath; hg diff -r $rev1:$rev2)`;
|
||||
} elsif ($type eq "git") {
|
||||
my $clonePath = scmPath . "/" . sha256_hex($uri);
|
||||
die if ! -d $clonePath;
|
||||
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
|
||||
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
|
||||
}
|
||||
|
||||
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
||||
@@ -233,163 +236,41 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub triggerJobset {
|
||||
my ($self, $c, $jobset, $force) = @_;
|
||||
print STDERR "triggering jobset ", $jobset->get_column('project') . ":" . $jobset->name, "\n";
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$jobset->update({ triggertime => time });
|
||||
$jobset->update({ forceeval => 1 }) if $force;
|
||||
});
|
||||
push @{$c->{stash}->{json}->{jobsetsTriggered}}, $jobset->get_column('project') . ":" . $jobset->name;
|
||||
sub readNormalizedLog {
|
||||
my ($file) = @_;
|
||||
my $pipe = (-f "$file.bz2" ? "cat $file.bz2 | bzip2 -d" : "cat $file");
|
||||
my $res = `$pipe`;
|
||||
|
||||
$res =~ s/\/nix\/store\/[a-z0-9]*-/\/nix\/store\/...-/g;
|
||||
$res =~ s/nix-build-[a-z0-9]*-/nix-build-...-/g;
|
||||
$res =~ s/[0-9]{2}:[0-9]{2}:[0-9]{2}/00:00:00/g;
|
||||
return $res;
|
||||
}
|
||||
|
||||
|
||||
sub push : Chained('api') PathPart('push') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
sub logdiff : Chained('api') PathPart('logdiff') Args(2) {
|
||||
my ($self, $c, $buildid1, $buildid2) = @_;
|
||||
|
||||
requirePost($c);
|
||||
my $diff = "";
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
my $build1 = getBuild($c, $buildid1);
|
||||
notFound($c, "Build with ID $buildid1 doesn't exist.")
|
||||
if !defined $build1;
|
||||
my $build2 = getBuild($c, $buildid2);
|
||||
notFound($c, "Build with ID $buildid2 doesn't exist.")
|
||||
if !defined $build2;
|
||||
|
||||
my $force = exists $c->request->query_params->{force};
|
||||
my @jobsets = split /,/, ($c->request->query_params->{jobsets} // "");
|
||||
foreach my $s (@jobsets) {
|
||||
my ($p, $j) = parseJobsetName($s);
|
||||
my $jobset = $c->model('DB::Jobsets')->find($p, $j);
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
next unless defined $jobset && ($force || ($jobset->project->enabled && $jobset->enabled));
|
||||
triggerJobset($self, $c, $jobset, $force);
|
||||
}
|
||||
|
||||
my @repos = split /,/, ($c->request->query_params->{repos} // "");
|
||||
foreach my $r (@repos) {
|
||||
my @jobsets = $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{
|
||||
join => 'project',
|
||||
where => \ [ 'exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value = ?)', [ 'value', $r ] ],
|
||||
order_by => 'me.id DESC'
|
||||
});
|
||||
foreach my $jobset (@jobsets) {
|
||||
requireEvalJobsetPrivileges($c, $jobset->project);
|
||||
triggerJobset($self, $c, $jobset, $force)
|
||||
}
|
||||
}
|
||||
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => { jobsetsTriggered => $c->stash->{json}->{jobsetsTriggered} }
|
||||
);
|
||||
}
|
||||
|
||||
sub verifyWebhookSignature {
|
||||
my ($c, $platform, $header_name, $signature_prefix) = @_;
|
||||
|
||||
# Get secrets from config
|
||||
my $webhook_config = $c->config->{webhooks} // {};
|
||||
my $platform_config = $webhook_config->{$platform} // {};
|
||||
my $secrets = $platform_config->{secret};
|
||||
|
||||
# Normalize to array
|
||||
$secrets = [] unless defined $secrets;
|
||||
$secrets = [$secrets] unless ref($secrets) eq 'ARRAY';
|
||||
|
||||
# Trim whitespace from secrets
|
||||
my @secrets = grep { defined && length } map { s/^\s+|\s+$//gr } @$secrets;
|
||||
|
||||
if (@secrets) {
|
||||
my $signature = $c->request->header($header_name);
|
||||
|
||||
if (!$signature) {
|
||||
$c->log->warn("Webhook authentication failed for $platform: Missing signature from IP " . $c->request->address);
|
||||
$c->response->status(401);
|
||||
$c->stash->{json} = { error => "Missing webhook signature" };
|
||||
$c->forward('View::JSON');
|
||||
return 0;
|
||||
}
|
||||
|
||||
# Get the raw body content from the buffered PSGI input
|
||||
# For JSON requests, Catalyst will have already read and buffered the body
|
||||
my $input = $c->request->env->{'psgi.input'};
|
||||
$input->seek(0, 0);
|
||||
local $/;
|
||||
my $payload = <$input>;
|
||||
$input->seek(0, 0); # Reset for any other consumers
|
||||
|
||||
unless (defined $payload && length $payload) {
|
||||
$c->log->warn("Webhook authentication failed for $platform: Empty request body from IP " . $c->request->address);
|
||||
$c->response->status(400);
|
||||
$c->stash->{json} = { error => "Empty request body" };
|
||||
$c->forward('View::JSON');
|
||||
return 0;
|
||||
}
|
||||
|
||||
my $valid = 0;
|
||||
for my $secret (@secrets) {
|
||||
my $expected = $signature_prefix . hmac_sha256_hex($payload, $secret);
|
||||
if (equals($signature, $expected)) {
|
||||
$valid = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
if (!$valid) {
|
||||
$c->log->warn("Webhook authentication failed for $platform: Invalid signature from IP " . $c->request->address);
|
||||
$c->response->status(401);
|
||||
$c->stash->{json} = { error => "Invalid webhook signature" };
|
||||
$c->forward('View::JSON');
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
if (-f $build1->logfile && -f $build2->logfile) {
|
||||
my $logtext1 = readNormalizedLog($build1->logfile);
|
||||
my $logtext2 = readNormalizedLog($build2->logfile);
|
||||
$diff = diff \$logtext1, \$logtext2;
|
||||
} else {
|
||||
$c->log->warn("Webhook authentication failed for $platform: Unable to validate signature from IP " . $c->request->address . " because no secrets are configured");
|
||||
$c->response->status(401);
|
||||
$c->stash->{json} = { error => "Invalid webhook signature" };
|
||||
$c->forward('View::JSON');
|
||||
return 0;
|
||||
$c->response->status(404);
|
||||
}
|
||||
}
|
||||
|
||||
sub push_github : Chained('api') PathPart('push-github') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
return unless verifyWebhookSignature($c, 'github', 'X-Hub-Signature-256', 'sha256=');
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $owner = ($in->{repository}->{owner}->{name} // $in->{repository}->{owner}->{login}) or die;
|
||||
my $repo = $in->{repository}->{name} or die;
|
||||
print STDERR "got push from GitHub repository $owner/$repo\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%github%$owner/$repo%"], [ 'value', "%github.com%$owner/$repo%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
}
|
||||
|
||||
sub push_gitea : Chained('api') PathPart('push-gitea') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->{stash}->{json}->{jobsetsTriggered} = [];
|
||||
|
||||
# Note: Gitea doesn't use sha256= prefix
|
||||
return unless verifyWebhookSignature($c, 'gitea', 'X-Gitea-Signature', '');
|
||||
|
||||
my $in = $c->request->{data};
|
||||
my $url = $in->{repository}->{clone_url} or die;
|
||||
$url =~ s/.git$//;
|
||||
print STDERR "got push from Gitea repository $url\n";
|
||||
|
||||
triggerJobset($self, $c, $_, 0) foreach $c->model('DB::Jobsets')->search(
|
||||
{ 'project.enabled' => 1, 'me.enabled' => 1 },
|
||||
{ join => 'project'
|
||||
, where => \ [ 'me.flake like ? or exists (select 1 from JobsetInputAlts where project = me.project and jobset = me.name and value like ?)', [ 'flake', "%$url%"], [ 'value', "%$url%" ] ]
|
||||
});
|
||||
$c->response->body("");
|
||||
$c->response->content_type('text/x-diff');
|
||||
$c->stash->{'plain'} = { data => (scalar $diff) || " " };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,9 +5,42 @@ use warnings;
|
||||
use base 'Catalyst::Controller';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::AddBuilds;
|
||||
use Data::Dump qw(dump);
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Crypt::RandPasswd;
|
||||
use Sys::Hostname::Long;
|
||||
use Email::Simple;
|
||||
use Email::Sender::Simple qw(sendmail);
|
||||
use Email::Sender::Transport::SMTP;
|
||||
use Config::General;
|
||||
|
||||
sub nixMachines {
|
||||
my ($c) = @_;
|
||||
my $result = "# GENERATED BY HYDRA\n";
|
||||
|
||||
foreach my $machine ($c->model("DB::BuildMachines")->all) {
|
||||
if($machine->enabled) {
|
||||
$result = $result . $machine->username . '@'. $machine->hostname . ' ';
|
||||
foreach my $system ($machine->buildmachinesystemtypes) {
|
||||
$result = $result . $system->system .',';
|
||||
}
|
||||
chop $result;
|
||||
$result = $result . ' '. $machine->ssh_key . ' ' . $machine->maxconcurrent . ' '. $machine->speedfactor . ' ' . $machine->options . "\n";
|
||||
}
|
||||
}
|
||||
return $result;
|
||||
}
|
||||
|
||||
sub saveNixMachines {
|
||||
my ($c) = @_;
|
||||
|
||||
die("File not writable: /etc/nix.machines") if ! -w "/etc/nix.machines" ;
|
||||
|
||||
open (NIXMACHINES, '>/etc/nix.machines') or die("Could not write to /etc/nix.machines");
|
||||
print NIXMACHINES nixMachines($c);
|
||||
close (NIXMACHINES);
|
||||
}
|
||||
|
||||
sub admin : Chained('/') PathPart('admin') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
@@ -15,51 +48,293 @@ sub admin : Chained('/') PathPart('admin') CaptureArgs(0) {
|
||||
$c->stash->{admin} = 1;
|
||||
}
|
||||
|
||||
sub index : Chained('admin') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{machines} = [$c->model('DB::BuildMachines')->search(
|
||||
{},
|
||||
{ order_by => ["enabled DESC", "hostname"]
|
||||
, '+select' => ["(select bs.stoptime from buildsteps as bs where bs.machine = (me.username || '\@' || me.hostname) and not bs.stoptime is null order by bs.stoptime desc limit 1)"]
|
||||
, '+as' => ['idle']
|
||||
})];
|
||||
$c->stash->{steps} = [ $c->model('DB::BuildSteps')->search(
|
||||
{ finished => 0, 'me.busy' => 1, 'build.busy' => 1, },
|
||||
{ join => [ 'build' ]
|
||||
, order_by => [ 'machine', 'stepnr' ]
|
||||
} ) ];
|
||||
$c->stash->{template} = 'admin.tt';
|
||||
}
|
||||
|
||||
sub updateUser {
|
||||
my ($c, $user) = @_;
|
||||
|
||||
my $username = trim $c->request->params->{"username"};
|
||||
my $fullname = trim $c->request->params->{"fullname"};
|
||||
my $emailaddress = trim $c->request->params->{"emailaddress"};
|
||||
my $emailonerror = trim $c->request->params->{"emailonerror"};
|
||||
my $roles = $c->request->params->{"roles"} ;
|
||||
|
||||
$user->update(
|
||||
{ fullname => $fullname
|
||||
, emailaddress => $emailaddress
|
||||
, emailonerror => $emailonerror
|
||||
});
|
||||
$user->userroles->delete_all;
|
||||
if(ref($roles) eq 'ARRAY') {
|
||||
for my $s (@$roles) {
|
||||
$user->userroles->create({ role => $s}) ;
|
||||
}
|
||||
} else {
|
||||
$user->userroles->create({ role => $roles}) if defined $roles ;
|
||||
}
|
||||
}
|
||||
|
||||
sub user : Chained('admin') PathPart('user') CaptureArgs(1) {
|
||||
my ($self, $c, $username) = @_;
|
||||
|
||||
requireAdmin($c);
|
||||
|
||||
my $user = $c->model('DB::Users')->find($username)
|
||||
or notFound($c, "User $username doesn't exist.");
|
||||
|
||||
$c->stash->{user} = $user;
|
||||
}
|
||||
|
||||
sub users : Chained('admin') PathPart('users') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{users} = [$c->model('DB::Users')->search({}, {order_by => "username"})];
|
||||
|
||||
$c->stash->{template} = 'users.tt';
|
||||
}
|
||||
|
||||
sub user_edit : Chained('user') PathPart('edit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'user.tt';
|
||||
$c->stash->{edit} = 1;
|
||||
}
|
||||
|
||||
sub user_edit_submit : Chained('user') PathPart('submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requirePost($c);
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
updateUser($c, $c->stash->{user}) ;
|
||||
});
|
||||
$c->res->redirect("/admin/users");
|
||||
}
|
||||
|
||||
sub sendemail {
|
||||
my ($to, $subject, $body) = @_;
|
||||
|
||||
my $url = hostname_long;
|
||||
my $sender = ($ENV{'USER'} || "hydra") . "@" . $url;
|
||||
|
||||
my $email = Email::Simple->create(
|
||||
header => [
|
||||
To => $to,
|
||||
From => "Hydra <$sender>",
|
||||
Subject => $subject
|
||||
],
|
||||
body => $body
|
||||
);
|
||||
|
||||
sendmail($email);
|
||||
}
|
||||
|
||||
sub reset_password : Chained('user') PathPart('reset-password') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my %config = new Config::General(getHydraConf)->getall;
|
||||
|
||||
# generate password
|
||||
my $password = Crypt::RandPasswd->word(8,10);
|
||||
|
||||
# calculate hash
|
||||
my $hashed = sha1_hex($password);
|
||||
|
||||
$c->stash->{user}-> update({ password => $hashed}) ;
|
||||
|
||||
# send email
|
||||
|
||||
sendemail(
|
||||
$c->stash->{user}->emailaddress,
|
||||
"New password for Hydra",
|
||||
"Hi,\n\n".
|
||||
"Your password has been reset. Your new password is '$password'.\n".
|
||||
"You can change your password at http://".$config{'base_uri'}."/change-password .\n".
|
||||
"With regards, Hydra\n"
|
||||
);
|
||||
|
||||
$c->res->redirect("/admin/users");
|
||||
}
|
||||
|
||||
sub machines : Chained('admin') PathPart('machines') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{machines} = getMachines;
|
||||
$c->stash->{machines} = [$c->model('DB::BuildMachines')->search({}, {order_by => "hostname"})];
|
||||
$c->stash->{systems} = [$c->model('DB::SystemTypes')->search({}, {select => ["system"], order_by => "system" })];
|
||||
$c->stash->{nixMachines} = nixMachines($c);
|
||||
$c->stash->{nixMachinesWritable} = (-e "/etc/nix.machines" && -w "/etc/nix.machines");
|
||||
|
||||
$c->stash->{template} = 'machines.tt';
|
||||
}
|
||||
|
||||
sub machine : Chained('admin') PathPart('machine') CaptureArgs(1) {
|
||||
my ($self, $c, $machineName) = @_;
|
||||
|
||||
requireAdmin($c);
|
||||
|
||||
my $machine = $c->model('DB::BuildMachines')->find($machineName)
|
||||
or notFound($c, "Machine $machineName doesn't exist.");
|
||||
|
||||
$c->stash->{machine} = $machine;
|
||||
}
|
||||
|
||||
|
||||
sub clear_queue_non_current : Chained('admin') PathPart('clear-queue-non-current') Args(0) {
|
||||
sub machine_edit : Chained('machine') PathPart('edit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $builds = $c->model('DB::Builds')->search_rs(
|
||||
{ id => { -in => \ "select id from Builds where id in ((select id from Builds where finished = 0) except (select build from JobsetEvalMembers where eval in (select max(id) from JobsetEvals where hasNewBuilds = 1 group by jobset_id)))" }
|
||||
$c->stash->{template} = 'machine.tt';
|
||||
$c->stash->{systemtypes} = [$c->model('DB::SystemTypes')->search({}, {order_by => "system"})];
|
||||
$c->stash->{edit} = 1;
|
||||
}
|
||||
|
||||
sub machine_edit_submit : Chained('machine') PathPart('submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requirePost($c);
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
updateMachine($c, $c->stash->{machine}) ;
|
||||
});
|
||||
saveNixMachines($c);
|
||||
$c->res->redirect("/admin/machines");
|
||||
}
|
||||
|
||||
sub updateMachine {
|
||||
my ($c, $machine) = @_;
|
||||
|
||||
my $hostname = trim $c->request->params->{"hostname"};
|
||||
my $username = trim $c->request->params->{"username"};
|
||||
my $maxconcurrent = trim $c->request->params->{"maxconcurrent"};
|
||||
my $speedfactor = trim $c->request->params->{"speedfactor"};
|
||||
my $ssh_key = trim $c->request->params->{"ssh_key"};
|
||||
my $options = trim $c->request->params->{"options"};
|
||||
my $systems = $c->request->params->{"systems"} ;
|
||||
|
||||
error($c, "Invalid or empty username.") if $username eq "";
|
||||
error($c, "Max concurrent builds should be an integer > 0.") if $maxconcurrent eq "" || ! $maxconcurrent =~ m/[0-9]+/;
|
||||
error($c, "Speed factor should be an integer > 0.") if $speedfactor eq "" || ! $speedfactor =~ m/[0-9]+/;
|
||||
error($c, "Invalid or empty SSH key.") if $ssh_key eq "";
|
||||
|
||||
$machine->update(
|
||||
{ username => $username
|
||||
, maxconcurrent => $maxconcurrent
|
||||
, speedfactor => $speedfactor
|
||||
, ssh_key => $ssh_key
|
||||
, options => $options
|
||||
});
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $builds);
|
||||
$c->flash->{successMsg} = "$n builds have been cancelled.";
|
||||
$c->res->redirect($c->request->referer // "/");
|
||||
$machine->buildmachinesystemtypes->delete_all;
|
||||
if(ref($systems) eq 'ARRAY') {
|
||||
for my $s (@$systems) {
|
||||
$machine->buildmachinesystemtypes->create({ system => $s}) ;
|
||||
}
|
||||
} else {
|
||||
$machine->buildmachinesystemtypes->create({ system => $systems}) ;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub clearfailedcache : Chained('admin') PathPart('clear-failed-cache') Args(0) {
|
||||
sub create_machine : Chained('admin') PathPart('create-machine') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->model('DB::FailedPaths')->delete;
|
||||
$c->res->redirect($c->request->referer // "/");
|
||||
|
||||
requireAdmin($c);
|
||||
|
||||
$c->stash->{template} = 'machine.tt';
|
||||
$c->stash->{systemtypes} = [$c->model('DB::SystemTypes')->search({}, {order_by => "system"})];
|
||||
$c->stash->{edit} = 1;
|
||||
$c->stash->{create} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub clearvcscache : Chained('admin') PathPart('clear-vcs-cache') Args(0) {
|
||||
sub create_machine_submit : Chained('admin') PathPart('create-machine/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->model('DB::CachedPathInputs')->delete;
|
||||
$c->model('DB::CachedGitInputs')->delete;
|
||||
$c->model('DB::CachedSubversionInputs')->delete;
|
||||
$c->model('DB::CachedBazaarInputs')->delete;
|
||||
$c->flash->{successMsg} = "VCS caches have been cleared.";
|
||||
$c->res->redirect($c->request->referer // "/");
|
||||
|
||||
requireAdmin($c);
|
||||
|
||||
my $hostname = trim $c->request->params->{"hostname"};
|
||||
error($c, "Invalid or empty hostname.") if $hostname eq "";
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
my $machine = $c->model('DB::BuildMachines')->create(
|
||||
{ hostname => $hostname });
|
||||
updateMachine($c, $machine);
|
||||
});
|
||||
saveNixMachines($c);
|
||||
$c->res->redirect("/admin/machines");
|
||||
}
|
||||
|
||||
sub machine_delete : Chained('machine') PathPart('delete') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requirePost($c);
|
||||
|
||||
sub managenews : Chained('admin') PathPart('news') Args(0) {
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{machine}->delete;
|
||||
});
|
||||
saveNixMachines($c);
|
||||
$c->res->redirect("/admin/machines");
|
||||
}
|
||||
|
||||
sub machine_enable : Chained('machine') PathPart('enable') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{machine}->update({ enabled => 1});
|
||||
saveNixMachines($c);
|
||||
$c->res->redirect("/admin/machines");
|
||||
}
|
||||
|
||||
sub machine_disable : Chained('machine') PathPart('disable') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{machine}->update({ enabled => 0});
|
||||
saveNixMachines($c);
|
||||
$c->res->redirect("/admin/machines");
|
||||
}
|
||||
|
||||
sub clear_queue_non_current : Chained('admin') Path('clear-queue-non-current') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
# !!! Mark the builds as cancelled instead.
|
||||
$c->model('DB::Builds')->search({finished => 0, iscurrent => 0, busy => 0})->delete_all;
|
||||
$c->res->redirect("/admin");
|
||||
}
|
||||
|
||||
sub clear_queue : Chained('admin') Path('clear-queue') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
# !!! Mark the builds as cancelled instead.
|
||||
$c->model('DB::Builds')->search({finished => 0, busy => 0})->delete_all;
|
||||
$c->res->redirect("/admin");
|
||||
}
|
||||
|
||||
sub clearfailedcache : Chained('admin') Path('clear-failed-cache') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $r = `nix-store --clear-failed-paths '*'`;
|
||||
|
||||
$c->res->redirect("/admin");
|
||||
}
|
||||
|
||||
sub clearvcscache : Chained('admin') Path('clear-vcs-cache') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
print "Clearing path cache\n";
|
||||
$c->model('DB::CachedPathInputs')->delete_all;
|
||||
|
||||
print "Clearing git cache\n";
|
||||
$c->model('DB::CachedGitInputs')->delete_all;
|
||||
|
||||
print "Clearing subversion cache\n";
|
||||
$c->model('DB::CachedSubversionInputs')->delete_all;
|
||||
|
||||
print "Clearing bazaar cache\n";
|
||||
$c->model('DB::CachedBazaarInputs')->delete_all;
|
||||
|
||||
$c->res->redirect("/admin");
|
||||
}
|
||||
|
||||
sub managenews : Chained('admin') Path('news') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{newsItems} = [$c->model('DB::NewsItems')->search({}, {order_by => 'createtime DESC'})];
|
||||
@@ -67,8 +342,7 @@ sub managenews : Chained('admin') PathPart('news') Args(0) {
|
||||
$c->stash->{template} = 'news.tt';
|
||||
}
|
||||
|
||||
|
||||
sub news_submit : Chained('admin') PathPart('news/submit') Args(0) {
|
||||
sub news_submit : Chained('admin') Path('news/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requirePost($c);
|
||||
@@ -85,11 +359,10 @@ sub news_submit : Chained('admin') PathPart('news/submit') Args(0) {
|
||||
$c->res->redirect("/admin/news");
|
||||
}
|
||||
|
||||
|
||||
sub news_delete : Chained('admin') PathPart('news/delete') Args(1) {
|
||||
sub news_delete : Chained('admin') Path('news/delete') Args(1) {
|
||||
my ($self, $c, $id) = @_;
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
my $newsItem = $c->model('DB::NewsItems')->find($id)
|
||||
or notFound($c, "Newsitem with id $id doesn't exist.");
|
||||
$newsItem->delete;
|
||||
@@ -98,5 +371,20 @@ sub news_delete : Chained('admin') PathPart('news/delete') Args(1) {
|
||||
$c->res->redirect("/admin/news");
|
||||
}
|
||||
|
||||
sub force_eval : Chained('admin') Path('eval') Args(2) {
|
||||
my ($self, $c, $projectName, $jobsetName) = @_;
|
||||
|
||||
my $project = $c->model('DB::Projects')->find($projectName)
|
||||
or notFound($c, "Project $projectName doesn't exist.");
|
||||
|
||||
$c->stash->{project} = $project;
|
||||
$c->stash->{jobset_} = $project->jobsets->search({name => $jobsetName});
|
||||
$c->stash->{jobset} = $c->stash->{jobset_}->single
|
||||
or notFound($c, "Jobset $jobsetName doesn't exist.");
|
||||
|
||||
(my $res, my $stdout, my $stderr) = captureStdoutStderr(60, ("hydra-evaluator", $projectName, $jobsetName));
|
||||
|
||||
$c->res->redirect("/project/$projectName");
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
@@ -1,139 +1,96 @@
|
||||
package Hydra::Controller::Build;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use File::Basename;
|
||||
use File::LibMagic;
|
||||
use Hydra::Helper::AddBuilds;
|
||||
use File::stat;
|
||||
use Data::Dump qw(dump);
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use JSON::PP;
|
||||
use IPC::Run qw(run);
|
||||
use IPC::Run3;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
use Nix::Store;
|
||||
|
||||
use feature 'state';
|
||||
|
||||
sub buildChain :Chained('/') :PathPart('build') :CaptureArgs(1) {
|
||||
sub build : Chained('/') PathPart CaptureArgs(1) {
|
||||
my ($self, $c, $id) = @_;
|
||||
|
||||
$id =~ /^[0-9]+$/ or error($c, "Invalid build ID ‘$id’.");
|
||||
|
||||
|
||||
$c->stash->{id} = $id;
|
||||
|
||||
|
||||
$c->stash->{build} = getBuild($c, $id);
|
||||
|
||||
notFound($c, "Build with ID $id doesn't exist.")
|
||||
if !defined $c->stash->{build};
|
||||
|
||||
$c->stash->{prevBuild} = getPreviousBuild($c, $c->stash->{build});
|
||||
$c->stash->{prevSuccessfulBuild} = getPreviousSuccessfulBuild($c, $c->stash->{build});
|
||||
$c->stash->{firstBrokenBuild} = getNextBuild($c, $c->stash->{prevSuccessfulBuild});
|
||||
|
||||
$c->stash->{mappers} = [$c->model('DB::UriRevMapper')->all];
|
||||
|
||||
$c->stash->{project} = $c->stash->{build}->project;
|
||||
$c->stash->{jobset} = $c->stash->{build}->jobset;
|
||||
$c->stash->{job} = $c->stash->{build}->job;
|
||||
$c->stash->{runcommandlogs} = [$c->stash->{build}->runcommandlogs->search({}, {order_by => ["id DESC"]})];
|
||||
|
||||
$c->stash->{runcommandlogProblem} = undef;
|
||||
if ($c->stash->{job} =~ qr/^runCommandHook\..*/) {
|
||||
if (!$c->config->{dynamicruncommand}->{enable}) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-server";
|
||||
} elsif (!$c->stash->{project}->enable_dynamic_run_command) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-project";
|
||||
} elsif (!$c->stash->{jobset}->enable_dynamic_run_command) {
|
||||
$c->stash->{runcommandlogProblem} = "disabled-jobset";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub findBuildStepByOutPath {
|
||||
my ($self, $c, $path) = @_;
|
||||
return $c->model('DB::BuildSteps')->search(
|
||||
{ path => $path, busy => 0 },
|
||||
{ join => ["buildstepoutputs"], order_by => ["status", "stopTime"], rows => 1 })->single;
|
||||
}
|
||||
|
||||
|
||||
sub findBuildStepByDrvPath {
|
||||
my ($self, $c, $drvPath) = @_;
|
||||
return $c->model('DB::BuildSteps')->search(
|
||||
{ drvpath => $drvPath, busy => 0 },
|
||||
{ order_by => ["status", "stopTime"], rows => 1 })->single;
|
||||
}
|
||||
|
||||
|
||||
sub build :Chained('buildChain') :PathPart('') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub build_GET {
|
||||
sub view_build : Chained('build') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
|
||||
$c->stash->{template} = 'build.tt';
|
||||
$c->stash->{isLocalStore} = isLocalStore();
|
||||
# XXX: If the derivation is content-addressed then this will always return
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { $_->path && $MACHINE_LOCAL_STORE->isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = $MACHINE_LOCAL_STORE->isValidPath($build->drvpath);
|
||||
$c->stash->{available} = isValidPath $build->outpath;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
$c->stash->{flashMsg} = $c->flash->{buildMsg};
|
||||
|
||||
$c->stash->{pathHash} = $c->stash->{available} ? queryPathHash($build->outpath) : undef;
|
||||
|
||||
if (!$build->finished && $build->busy) {
|
||||
my $logfile = $build->logfile;
|
||||
$c->stash->{logtext} = `cat $logfile` if defined $logfile && -e $logfile;
|
||||
}
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||
if (defined $cachedBuildStep) {
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||
$c->stash->{cachedBuildStep} = $cachedBuildStep;
|
||||
}
|
||||
(my $cachedBuildStep) = $c->model('DB::BuildSteps')->search({ outpath => $build->outpath }, {}) ;
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build if defined $cachedBuildStep;
|
||||
}
|
||||
|
||||
(my $lastBuildStep) = $build->buildsteps->search({},{order_by => "stepnr DESC", rows => 1});
|
||||
my $path = defined $lastBuildStep ? $lastBuildStep->logfile : "" ;
|
||||
if ($build->finished && ($build->buildstatus == 1 || $build->buildstatus == 6) && !($path eq "") && -f $lastBuildStep->logfile) {
|
||||
my $logtext = `tail -n 50 $path`;
|
||||
$c->stash->{logtext} = removeAsciiEscapes($logtext);
|
||||
}
|
||||
|
||||
# Get the first eval of which this build was a part.
|
||||
($c->stash->{nrEvals}) = $build->jobsetevals->search({ hasnewbuilds => 1 })->count;
|
||||
$c->stash->{eval} = getFirstEval($build);
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => $build
|
||||
);
|
||||
|
||||
if (defined $c->stash->{eval}) {
|
||||
my ($eval2) = $c->stash->{eval}->jobset->jobsetevals->search(
|
||||
{ hasnewbuilds => 1, id => { '<', $c->stash->{eval}->id } },
|
||||
{ order_by => "id DESC", rows => 1 });
|
||||
$c->stash->{otherEval} = $eval2 if defined $eval2;
|
||||
if ($build->finished) {
|
||||
$c->stash->{prevBuilds} = [$c->model('DB::Builds')->search(
|
||||
{ project => $c->stash->{project}->name
|
||||
, jobset => $c->stash->{build}->jobset->name
|
||||
, job => $c->stash->{build}->job->name
|
||||
, 'me.system' => $build->system
|
||||
, finished => 1
|
||||
, buildstatus => 0
|
||||
, 'me.id' => { '<=' => $build->id }
|
||||
}
|
||||
, { join => "actualBuildStep"
|
||||
, "+select" => ["actualBuildStep.stoptime - actualBuildStep.starttime"]
|
||||
, "+as" => ["actualBuildTime"]
|
||||
, order_by => "me.id DESC"
|
||||
, rows => 50
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
||||
# If this is an aggregate build, get its constituents.
|
||||
$c->stash->{constituents} = [$build->constituents_->search({}, {order_by => ["job"]})];
|
||||
|
||||
$c->stash->{steps} = [$build->buildsteps->search({}, {order_by => "stepnr desc"})];
|
||||
|
||||
$c->stash->{binaryCachePublicUri} = $c->config->{binary_cache_public_uri};
|
||||
}
|
||||
|
||||
sub constituents :Chained('buildChain') :PathPart('constituents') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub constituents_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [$build->constituents_->search({}, {order_by => ["job"]})]
|
||||
my $maxRelated = 100;
|
||||
my $r = $c->model('DB::Builds')->search(
|
||||
{ eval => { -in => $build->jobsetevalmembers->search({}, {rows => 1})->get_column('eval')->as_query } },
|
||||
{ join => 'jobsetevalmembers', order_by => [ 'project', 'jobset', 'job'], distinct => 1, rows => $maxRelated + 1 }
|
||||
);
|
||||
$c->stash->{relatedbuilds} = [$r->all];
|
||||
delete $c->stash->{relatedbuilds} if scalar(@{$c->stash->{relatedbuilds}}) > $maxRelated;
|
||||
}
|
||||
|
||||
|
||||
sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
sub view_nixlog : Chained('build') PathPart('nixlog') {
|
||||
my ($self, $c, $stepnr, $mode) = @_;
|
||||
|
||||
my $step = $c->stash->{build}->buildsteps->find({stepnr => $stepnr});
|
||||
@@ -141,53 +98,61 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
showLog($c, $step->logfile, $mode);
|
||||
}
|
||||
|
||||
|
||||
sub view_log : Chained('buildChain') PathPart('log') {
|
||||
sub view_log : Chained('build') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
error($c, "Build didn't produce a log.") if !defined $c->stash->{build}->logfile;
|
||||
|
||||
|
||||
sub view_runcommandlog : Chained('buildChain') PathPart('runcommandlog') {
|
||||
my ($self, $c, $uuid, $mode) = @_;
|
||||
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("runcommandlog"), $uuid);
|
||||
showLog($c, $mode, $log_uri);
|
||||
$c->stash->{template} = 'runcommand-log.tt';
|
||||
$c->stash->{runcommandlog} = $c->stash->{build}->runcommandlogs->find({ uuid => $uuid });
|
||||
showLog($c, $c->stash->{build}->logfile, $mode);
|
||||
}
|
||||
|
||||
|
||||
sub showLog {
|
||||
my ($c, $mode, $log_uri) = @_;
|
||||
$mode //= "pretty";
|
||||
my ($c, $path, $mode) = @_;
|
||||
|
||||
my $fallbackpath = -f $path ? $path : "$path.bz2";
|
||||
|
||||
notFound($c, "Log file $path no longer exists.") unless -f $fallbackpath;
|
||||
$path = $fallbackpath;
|
||||
|
||||
my $pipestart = ($path =~ /.bz2$/ ? "cat $path | bzip2 -d" : "cat $path") ;
|
||||
|
||||
if (!$mode) {
|
||||
# !!! quick hack
|
||||
my $pipeline = $pipestart
|
||||
. " | nix-log2xml | xsltproc " . $c->path_to("xsl/mark-errors.xsl") . " -"
|
||||
. " | xsltproc " . $c->path_to("xsl/log2html.xsl") . " - | tail -n +2";
|
||||
|
||||
if ($mode eq "pretty") {
|
||||
$c->stash->{log_uri} = $log_uri;
|
||||
$c->stash->{template} = 'log.tt';
|
||||
$c->stash->{logtext} = `$pipeline`;
|
||||
}
|
||||
|
||||
elsif ($mode eq "raw") {
|
||||
$c->res->redirect($log_uri);
|
||||
$c->stash->{'plain'} = { data => (scalar `$pipestart`) || " " };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
elsif ($mode eq "tail-reload") {
|
||||
my $url = $c->request->uri->as_string;
|
||||
$url =~ s/tail-reload/tail/g;
|
||||
$c->stash->{url} = $url;
|
||||
$c->stash->{reload} = !$c->stash->{build}->finished && $c->stash->{build}->busy;
|
||||
$c->stash->{title} = "";
|
||||
$c->stash->{contents} = (scalar `$pipestart | tail -n 50`) || " ";
|
||||
$c->stash->{template} = 'plain-reload.tt';
|
||||
}
|
||||
|
||||
elsif ($mode eq "tail") {
|
||||
my $lines = 50;
|
||||
$c->stash->{log_uri} = $log_uri . "?tail=$lines";
|
||||
$c->stash->{tail} = $lines;
|
||||
$c->stash->{template} = 'log.tt';
|
||||
$c->stash->{'plain'} = { data => (scalar `$pipestart | tail -n 50`) || " " };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
else {
|
||||
error($c, "Unknown log display mode '$mode'.");
|
||||
error($c, "Unknown log display mode `$mode'.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,134 +161,56 @@ sub defaultUriForProduct {
|
||||
my ($self, $c, $product, @path) = @_;
|
||||
my $x = $product->productnr
|
||||
. ($product->name ? "/" . $product->name : "")
|
||||
. ($product->defaultpath ? "/" . $product->defaultpath : "");
|
||||
. ($product->defaultpath ? "/" . $product->defaultpath : "");
|
||||
return $c->uri_for($self->action_for("download"), $c->req->captures, (split /\//, $x), @path);
|
||||
}
|
||||
|
||||
|
||||
sub checkPath {
|
||||
my ($self, $c, $path) = @_;
|
||||
my $p = pathIsInsidePrefix($path, $Nix::Config::storeDir);
|
||||
error($c, "Build product refers outside of the Nix store.") unless defined $p;
|
||||
return $p;
|
||||
}
|
||||
sub download : Chained('build') PathPart {
|
||||
my ($self, $c, $productnr, @path) = @_;
|
||||
|
||||
$productnr = 1 if !defined $productnr;
|
||||
|
||||
sub serveFile {
|
||||
my ($c, $path) = @_;
|
||||
my $product = $c->stash->{build}->buildproducts->find({productnr => $productnr});
|
||||
notFound($c, "Build doesn't have a product #$productnr.") if !defined $product;
|
||||
|
||||
my $res = runCommand(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"ls-store", "--store", getStoreUri(), "--json", "$path"]);
|
||||
|
||||
if ($res->{status}) {
|
||||
notFound($c, "File '$path' does not exist.") if $res->{stderr} =~ /does not exist/;
|
||||
die "$res->{stderr}\n";
|
||||
}
|
||||
|
||||
my $ls = decode_json($res->{stdout});
|
||||
|
||||
if ($ls->{type} eq "directory" && substr($c->request->uri, -1) ne "/") {
|
||||
return $c->res->redirect($c->request->uri . "/");
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "directory" && defined $ls->{entries}->{"index.html"}) {
|
||||
return serveFile($c, "$path/index.html");
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "symlink") {
|
||||
my $target = $ls->{target};
|
||||
return serveFile($c, substr($target, 0, 1) eq "/" ? $target : dirname($path) . "/" . $target);
|
||||
}
|
||||
|
||||
elsif ($ls->{type} eq "regular") {
|
||||
# Have the hosted data considered its own origin to avoid being a giant
|
||||
# XSS hole.
|
||||
$c->response->header('Content-Security-Policy' => 'sandbox allow-scripts');
|
||||
|
||||
$c->stash->{'plain'} = { data => readIntoSocket(cmd => ["nix", "--experimental-features", "nix-command",
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type.
|
||||
my $type = "text/plain";
|
||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||
my $ext = $1;
|
||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||
$type = ref $t ? $t->type : $t if $t;
|
||||
} else {
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
$type = $info->{mime_with_encoding};
|
||||
}
|
||||
$c->response->content_type($type);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
else {
|
||||
error($c, "Do not know how to serve path '$path'.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub download : Chained('buildChain') PathPart {
|
||||
my ($self, $c, $productRef, @path) = @_;
|
||||
|
||||
$productRef = 1 if !defined $productRef;
|
||||
|
||||
my $product;
|
||||
if ($productRef =~ /^[0-9]+$/) {
|
||||
$product = $c->stash->{build}->buildproducts->find({productnr => $productRef});
|
||||
} else {
|
||||
$product = $c->stash->{build}->buildproducts->find({name => $productRef});
|
||||
@path = ($productRef, @path);
|
||||
}
|
||||
notFound($c, "Build doesn't have a product $productRef.") if !defined $product;
|
||||
|
||||
if ($product->path !~ /^($Nix::Config::storeDir\/[^\/]+)/) {
|
||||
die "Invalid store path '" . $product->path . "'.\n";
|
||||
}
|
||||
my $storePath = $1;
|
||||
notFound($c, "Product " . $product->path . " has disappeared.") unless -e $product->path;
|
||||
|
||||
return $c->res->redirect(defaultUriForProduct($self, $c, $product, @path))
|
||||
if scalar @path == 0 && ($product->name || $product->defaultpath);
|
||||
|
||||
|
||||
# If the product has a name, then the first path element can be
|
||||
# ignored (it's the name included in the URL for informational purposes).
|
||||
shift @path if $product->name;
|
||||
|
||||
shift @path if $product->name;
|
||||
|
||||
# Security paranoia.
|
||||
foreach my $elem (@path) {
|
||||
error($c, "Invalid filename '$elem'.") if $elem !~ /^$pathCompRE$/;
|
||||
error($c, "Invalid filename $elem.") if $elem !~ /^$pathCompRE$/;
|
||||
}
|
||||
|
||||
|
||||
my $path = $product->path;
|
||||
$path .= "/" . join("/", @path) if scalar @path > 0;
|
||||
|
||||
serveFile($c, $path);
|
||||
# If this is a directory but no "/" is attached, then redirect.
|
||||
if (-d $path && substr($c->request->uri, -1) ne "/") {
|
||||
return $c->res->redirect($c->request->uri . "/");
|
||||
}
|
||||
|
||||
$path = "$path/index.html" if -d $path && -e "$path/index.html";
|
||||
|
||||
$c->response->headers->last_modified($c->stash->{build}->stoptime);
|
||||
}
|
||||
notFound($c, "File $path does not exist.") if !-e $path;
|
||||
|
||||
notFound($c, "Path $path is a directory.") if -d $path;
|
||||
|
||||
sub output : Chained('buildChain') PathPart Args(1) {
|
||||
my ($self, $c, $outputName) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
error($c, "This build is not finished yet.") unless $build->finished;
|
||||
my $output = $build->buildoutputs->find({name => $outputName});
|
||||
notFound($c, "This build has no output named ‘$outputName’") unless defined $output;
|
||||
gone($c, "Output is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($output->path);
|
||||
|
||||
$c->response->header('Content-Disposition', "attachment; filename=\"build-${\$build->id}-${\$outputName}.nar.bz2\"");
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $output->path;
|
||||
$c->serve_static_file($path);
|
||||
$c->response->headers->last_modified($c->stash->{build}->timestamp);
|
||||
}
|
||||
|
||||
|
||||
# Redirect to a download with the given type. Useful when you want to
|
||||
# link to some build product of the latest build (i.e. in conjunction
|
||||
# with the .../latest redirect).
|
||||
sub download_by_type : Chained('buildChain') PathPart('download-by-type') {
|
||||
sub download_by_type : Chained('build') PathPart('download-by-type') {
|
||||
my ($self, $c, $type, $subtype, @path) = @_;
|
||||
|
||||
notFound($c, "You need to specify a type and a subtype in the URI.")
|
||||
@@ -338,93 +225,56 @@ sub download_by_type : Chained('buildChain') PathPart('download-by-type') {
|
||||
}
|
||||
|
||||
|
||||
sub contents : Chained('buildChain') PathPart Args(1) {
|
||||
sub contents : Chained('build') PathPart Args(1) {
|
||||
my ($self, $c, $productnr) = @_;
|
||||
|
||||
my $product = $c->stash->{build}->buildproducts->find({productnr => $productnr});
|
||||
notFound($c, "Build doesn't have a product $productnr.") if !defined $product;
|
||||
|
||||
my $path = $product->path;
|
||||
|
||||
$path = checkPath($self, $c, $path);
|
||||
|
||||
|
||||
notFound($c, "Product $path has disappeared.") unless -e $path;
|
||||
|
||||
# FIXME: use nix store cat
|
||||
|
||||
my $res;
|
||||
|
||||
if ($product->type eq "nix-build" && -d $path) {
|
||||
# FIXME: use nix ls-store -R --json
|
||||
# We need to use a pipe between find and xargs, so we'll use IPC::Run
|
||||
my $error;
|
||||
# Run find with absolute path and post-process to get relative paths
|
||||
my $success = run(['find', $path, '-print0'], '|', ['xargs', '-0', 'ls', '-ld', '--'], \$res, \$error);
|
||||
error($c, "`find $path -print0 | xargs -0 ls -ld --' error: $error") unless $success;
|
||||
|
||||
# Strip the base path to show relative paths
|
||||
my $escaped_path = quotemeta($path);
|
||||
$res =~ s/^(.*\s)$escaped_path(\/|$)/$1.$2/mg;
|
||||
|
||||
#my $baseuri = $c->uri_for('/build', $c->stash->{build}->id, 'download', $product->productnr);
|
||||
#$baseuri .= "/".$product->name if $product->name;
|
||||
#$res =~ s/(\.\/)($relPathRE)/<a href="$baseuri\/$2">$1$2<\/a>/g;
|
||||
$res = `cd $path && find . -print0 | xargs -0 ls -ld --`;
|
||||
error($c, "`ls -lR' error: $?") if $? != 0;
|
||||
|
||||
my $baseuri = $c->uri_for('/build', $c->stash->{build}->id, 'download', $product->productnr);
|
||||
$baseuri .= "/".$product->name if $product->name;
|
||||
$res =~ s/(\.\/)($relPathRE)/<a href="$baseuri\/$2">$1$2<\/a>/g;
|
||||
}
|
||||
|
||||
elsif ($path =~ /\.rpm$/) {
|
||||
my ($stdout1, $stderr1);
|
||||
run3(['rpm', '--query', '--info', '--package', $path], \undef, \$stdout1, \$stderr1);
|
||||
error($c, "RPM error: $stderr1") if $? != 0;
|
||||
$res = $stdout1;
|
||||
|
||||
$res = `rpm --query --info --package "$path"`;
|
||||
error($c, "RPM error: $?") if $? != 0;
|
||||
$res .= "===\n";
|
||||
|
||||
my ($stdout2, $stderr2);
|
||||
run3(['rpm', '--query', '--list', '--verbose', '--package', $path], \undef, \$stdout2, \$stderr2);
|
||||
error($c, "RPM error: $stderr2") if $? != 0;
|
||||
$res .= $stdout2;
|
||||
$res .= `rpm --query --list --verbose --package "$path"`;
|
||||
error($c, "RPM error: $?") if $? != 0;
|
||||
}
|
||||
|
||||
elsif ($path =~ /\.deb$/) {
|
||||
my ($stdout1, $stderr1);
|
||||
run3(['dpkg-deb', '--info', $path], \undef, \$stdout1, \$stderr1);
|
||||
error($c, "`dpkg-deb' error: $stderr1") if $? != 0;
|
||||
$res = $stdout1;
|
||||
|
||||
$res = `dpkg-deb --info "$path"`;
|
||||
error($c, "`dpkg-deb' error: $?") if $? != 0;
|
||||
$res .= "===\n";
|
||||
|
||||
my ($stdout2, $stderr2);
|
||||
run3(['dpkg-deb', '--contents', $path], \undef, \$stdout2, \$stderr2);
|
||||
error($c, "`dpkg-deb' error: $stderr2") if $? != 0;
|
||||
$res .= $stdout2;
|
||||
$res .= `dpkg-deb --contents "$path"`;
|
||||
error($c, "`dpkg-deb' error: $?") if $? != 0;
|
||||
}
|
||||
|
||||
elsif ($path =~ /\.(tar(\.gz|\.bz2|\.xz|\.lzma)?|tgz)$/ ) {
|
||||
my ($stdout, $stderr);
|
||||
run3(['tar', 'tvfa', $path], \undef, \$stdout, \$stderr);
|
||||
error($c, "`tar' error: $stderr") if $? != 0;
|
||||
$res = $stdout;
|
||||
$res = `tar tvfa "$path"`;
|
||||
error($c, "`tar' error: $?") if $? != 0;
|
||||
}
|
||||
|
||||
elsif ($path =~ /\.(zip|jar)$/ ) {
|
||||
my ($stdout, $stderr);
|
||||
run3(['unzip', '-v', $path], \undef, \$stdout, \$stderr);
|
||||
error($c, "`unzip' error: $stderr") if $? != 0;
|
||||
$res = $stdout;
|
||||
$res = `unzip -v "$path"`;
|
||||
error($c, "`unzip' error: $?") if $? != 0;
|
||||
}
|
||||
|
||||
elsif ($path =~ /\.iso$/ ) {
|
||||
# Run first isoinfo command
|
||||
my ($stdout1, $stderr1);
|
||||
run3(['isoinfo', '-d', '-i', $path], \undef, \$stdout1, \$stderr1);
|
||||
error($c, "`isoinfo' error: $stderr1") if $? != 0;
|
||||
$res = $stdout1;
|
||||
|
||||
# Run second isoinfo command
|
||||
my ($stdout2, $stderr2);
|
||||
run3(['isoinfo', '-l', '-R', '-i', $path], \undef, \$stdout2, \$stderr2);
|
||||
error($c, "`isoinfo' error: $stderr2") if $? != 0;
|
||||
$res .= $stdout2;
|
||||
$res = `isoinfo -d -i "$path" && isoinfo -l -R -i "$path"`;
|
||||
error($c, "`isoinfo' error: $?") if $? != 0;
|
||||
}
|
||||
|
||||
else {
|
||||
@@ -432,211 +282,275 @@ sub contents : Chained('buildChain') PathPart Args(1) {
|
||||
}
|
||||
|
||||
die unless $res;
|
||||
|
||||
|
||||
$c->stash->{title} = "Contents of ".$product->path;
|
||||
$c->stash->{contents} = decode("utf-8", $res);
|
||||
$c->stash->{contents} = "<pre>$res</pre>";
|
||||
$c->stash->{template} = 'plain.tt';
|
||||
}
|
||||
|
||||
|
||||
sub getDependencyGraph {
|
||||
my ($self, $c, $runtime, $done, $path) = @_;
|
||||
my $node = $$done{$path};
|
||||
sub runtimedeps : Chained('build') PathPart('runtime-deps') {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
notFound($c, "Path " . $build->outpath . " is no longer available.")
|
||||
unless isValidPath($build->outpath);
|
||||
|
||||
$c->stash->{current_view} = 'NixDepGraph';
|
||||
$c->stash->{storePaths} = [$build->outpath];
|
||||
|
||||
$c->res->content_type('image/png'); # !!!
|
||||
}
|
||||
|
||||
if (!defined $node) {
|
||||
$path =~ /\/[a-z0-9]+-(.*)$/;
|
||||
my $name = $1 // $path;
|
||||
$name =~ s/\.drv$//;
|
||||
$node =
|
||||
{ path => $path
|
||||
, name => $name
|
||||
, buildStep => $runtime
|
||||
? findBuildStepByOutPath($self, $c, $path)
|
||||
: findBuildStepByDrvPath($self, $c, $path)
|
||||
};
|
||||
$$done{$path} = $node;
|
||||
my @refs;
|
||||
foreach my $ref ($MACHINE_LOCAL_STORE->queryReferences($path)) {
|
||||
next if $ref eq $path;
|
||||
next unless $runtime || $ref =~ /\.drv$/;
|
||||
getDependencyGraph($self, $c, $runtime, $done, $ref);
|
||||
push @refs, $ref;
|
||||
}
|
||||
# Show in reverse topological order to flatten the graph.
|
||||
# Should probably do a proper BFS.
|
||||
my @sorted = reverse $MACHINE_LOCAL_STORE->topoSortPaths(@refs);
|
||||
$node->{refs} = [map { $$done{$_} } @sorted];
|
||||
|
||||
sub buildtimedeps : Chained('build') PathPart('buildtime-deps') {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
notFound($c, "Path " . $build->drvpath . " is no longer available.")
|
||||
unless isValidPath($build->drvpath);
|
||||
|
||||
$c->stash->{current_view} = 'NixDepGraph';
|
||||
$c->stash->{storePaths} = [$build->drvpath];
|
||||
|
||||
$c->res->content_type('image/png'); # !!!
|
||||
}
|
||||
|
||||
|
||||
sub deps : Chained('build') PathPart('deps') {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
$c->stash->{available} = isValidPath $build->outpath;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
|
||||
my $drvpath = $build->drvpath;
|
||||
my $outpath = $build->outpath;
|
||||
|
||||
my @buildtimepaths = ();
|
||||
my @buildtimedeps = ();
|
||||
@buildtimepaths = split '\n', `nix-store --query --requisites --include-outputs $drvpath` if isValidPath($build->drvpath);
|
||||
|
||||
my @runtimepaths = ();
|
||||
my @runtimedeps = ();
|
||||
@runtimepaths = split '\n', `nix-store --query --requisites --include-outputs $outpath` if isValidPath($build->outpath);
|
||||
|
||||
foreach my $p (@buildtimepaths) {
|
||||
my $buildStep;
|
||||
($buildStep) = $c->model('DB::BuildSteps')->search({ outpath => $p }, {}) ;
|
||||
my %dep = ( buildstep => $buildStep, path => $p ) ;
|
||||
push(@buildtimedeps, \%dep);
|
||||
}
|
||||
|
||||
return $node;
|
||||
foreach my $p (@runtimepaths) {
|
||||
my $buildStep;
|
||||
($buildStep) = $c->model('DB::BuildSteps')->search({ outpath => $p }, {}) ;
|
||||
my %dep = ( buildstep => $buildStep, path => $p ) ;
|
||||
push(@runtimedeps, \%dep);
|
||||
}
|
||||
|
||||
|
||||
$c->stash->{buildtimedeps} = \@buildtimedeps;
|
||||
$c->stash->{runtimedeps} = \@runtimedeps;
|
||||
|
||||
$c->stash->{template} = 'deps.tt';
|
||||
}
|
||||
|
||||
|
||||
sub build_deps : Chained('buildChain') PathPart('build-deps') {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
my $drvPath = $build->drvpath;
|
||||
|
||||
error($c, "Derivation no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($drvPath);
|
||||
|
||||
$c->stash->{buildTimeGraph} = getDependencyGraph($self, $c, 0, {}, $drvPath);
|
||||
|
||||
$c->stash->{template} = 'build-deps.tt';
|
||||
}
|
||||
|
||||
|
||||
sub runtime_deps : Chained('buildChain') PathPart('runtime-deps') {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
my @outPaths = map { $_->path } $build->buildoutputs->all;
|
||||
|
||||
requireLocalStore($c);
|
||||
|
||||
error($c, "Build outputs no longer available.") unless all { $MACHINE_LOCAL_STORE->isValidPath($_) } @outPaths;
|
||||
|
||||
my $done = {};
|
||||
$c->stash->{runtimeGraph} = [ map { getDependencyGraph($self, $c, 1, $done, $_) } @outPaths ];
|
||||
|
||||
$c->stash->{template} = 'runtime-deps.tt';
|
||||
}
|
||||
|
||||
|
||||
sub nix : Chained('buildChain') PathPart('nix') CaptureArgs(0) {
|
||||
sub nix : Chained('build') PathPart('nix') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
notFound($c, "Build cannot be downloaded as a closure or Nix package.")
|
||||
if $build->buildproducts->search({type => "nix-build"})->count == 0;
|
||||
if !$build->buildproducts->find({type => "nix-build"});
|
||||
|
||||
if (isLocalStore) {
|
||||
foreach my $out ($build->buildoutputs) {
|
||||
notFound($c, "Path " . $out->path . " is no longer available.")
|
||||
unless $MACHINE_LOCAL_STORE->isValidPath($out->path);
|
||||
}
|
||||
}
|
||||
|
||||
$c->stash->{channelBuilds} = $c->model('DB::Builds')->search(
|
||||
{ id => $build->id },
|
||||
{ join => ["buildoutputs"]
|
||||
, '+select' => ['buildoutputs.path', 'buildoutputs.name'], '+as' => ['outpath', 'outname'] });
|
||||
notFound($c, "Path " . $build->outpath . " is no longer available.")
|
||||
unless isValidPath($build->outpath);
|
||||
|
||||
$c->stash->{channelBuilds} = $c->model('DB::Builds')->search({id => $build->id});
|
||||
}
|
||||
|
||||
|
||||
sub restart : Chained('buildChain') PathPart Args(0) {
|
||||
sub restart : Chained('build') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
requireRestartPrivileges($c, $build->project);
|
||||
my $n = restartBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search_rs({ id => $build->id }));
|
||||
error($c, "This build cannot be restarted.") if $n != 1;
|
||||
$c->flash->{successMsg} = "Build has been restarted.";
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
|
||||
requireProjectOwner($c, $build->project);
|
||||
|
||||
my $drvpath = $build->drvpath ;
|
||||
error($c, "This build cannot be restarted.")
|
||||
unless $build->finished && -f $drvpath ;
|
||||
|
||||
restartBuild($c->model('DB')->schema, $build);
|
||||
|
||||
$c->flash->{buildMsg} = "Build has been restarted.";
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("view_build"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub cancel : Chained('buildChain') PathPart Args(0) {
|
||||
sub cancel : Chained('build') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
requireCancelBuildPrivileges($c, $build->project);
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->model('DB::Builds')->search_rs({ id => $build->id }));
|
||||
error($c, "This build cannot be cancelled.") if $n != 1;
|
||||
$c->flash->{successMsg} = "Build has been cancelled.";
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub keep : Chained('buildChain') PathPart Args(1) {
|
||||
my ($self, $c, $x) = @_;
|
||||
my $keep = $x eq "1" ? 1 : 0;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
requireProjectOwner($c, $build->project);
|
||||
|
||||
if ($keep) {
|
||||
registerRoot $_->path foreach $build->buildoutputs;
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
error($c, "This build cannot be cancelled.")
|
||||
if $build->finished || $build->busy;
|
||||
|
||||
# !!! Actually, it would be nice to be able to cancel busy
|
||||
# builds as well, but we would have to send a signal or
|
||||
# something to the build process.
|
||||
|
||||
$build->update(
|
||||
{ finished => 1, busy => 0, timestamp => time
|
||||
, iscachedbuild => 0, buildstatus => 4 # = cancelled
|
||||
});
|
||||
});
|
||||
|
||||
$c->flash->{buildMsg} = "Build has been cancelled.";
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("view_build"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub keep : Chained('build') PathPart Args(1) {
|
||||
my ($self, $c, $newStatus) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
requireProjectOwner($c, $build->project);
|
||||
|
||||
die unless $newStatus == 0 || $newStatus == 1;
|
||||
|
||||
registerRoot $build->outpath if $newStatus == 1;
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$build->update({keep => int $newStatus});
|
||||
});
|
||||
|
||||
$c->flash->{buildMsg} =
|
||||
$newStatus == 0 ? "Build will not be kept." : "Build will be kept.";
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("view_build"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub add_to_release : Chained('build') PathPart('add-to-release') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
requireProjectOwner($c, $build->project);
|
||||
|
||||
my $releaseName = trim $c->request->params->{name};
|
||||
|
||||
my $release = $build->project->releases->find({name => $releaseName});
|
||||
|
||||
error($c, "This project has no release named `$releaseName'.") unless $release;
|
||||
|
||||
error($c, "This build is already a part of release `$releaseName'.")
|
||||
if $release->releasemembers->find({build => $build->id});
|
||||
|
||||
registerRoot $build->outpath;
|
||||
|
||||
error($c, "This build is no longer available.") unless isValidPath $build->outpath;
|
||||
|
||||
$release->releasemembers->create({build => $build->id, description => $build->description});
|
||||
|
||||
$c->flash->{buildMsg} = "Build added to project <tt>$releaseName</tt>.";
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("view_build"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub clone : Chained('build') PathPart('clone') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
requireProjectOwner($c, $build->project);
|
||||
|
||||
$c->stash->{template} = 'clone-build.tt';
|
||||
}
|
||||
|
||||
|
||||
sub clone_submit : Chained('build') PathPart('clone/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
requireProjectOwner($c, $build->project);
|
||||
|
||||
my ($nixExprPath, $nixExprInputName) = Hydra::Controller::Jobset::nixExprPathFromParams $c;
|
||||
|
||||
my $jobName = trim $c->request->params->{"jobname"};
|
||||
error($c, "Invalid job name: $jobName") if $jobName !~ /^$jobNameRE$/;
|
||||
|
||||
my $inputInfo = {};
|
||||
|
||||
foreach my $param (keys %{$c->request->params}) {
|
||||
next unless $param =~ /^input-(\w+)-name$/;
|
||||
my $baseName = $1;
|
||||
my ($inputName, $inputType) =
|
||||
Hydra::Controller::Jobset::checkInput($c, $baseName);
|
||||
my $inputValue = Hydra::Controller::Jobset::checkInputValue(
|
||||
$c, $inputType, $c->request->params->{"input-$baseName-value"});
|
||||
eval {
|
||||
# !!! fetchInput can take a long time, which might cause
|
||||
# the current HTTP request to time out. So maybe this
|
||||
# should be done asynchronously. But then error reporting
|
||||
# becomes harder.
|
||||
my $info = fetchInput(
|
||||
$c->model('DB'), $build->project, $build->jobset,
|
||||
$inputName, $inputType, $inputValue);
|
||||
push @{$$inputInfo{$inputName}}, $info if defined $info;
|
||||
};
|
||||
error($c, $@) if $@;
|
||||
}
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$build->update({keep => $keep});
|
||||
});
|
||||
my ($jobs, $nixExprInput) = evalJobs($inputInfo, $nixExprInputName, $nixExprPath);
|
||||
|
||||
$c->flash->{successMsg} =
|
||||
$keep ? "Build will be kept." : "Build will not be kept.";
|
||||
my $job;
|
||||
foreach my $j (@{$jobs->{job}}) {
|
||||
print STDERR $j->{jobName}, "\n";
|
||||
if ($j->{jobName} eq $jobName) {
|
||||
error($c, "Nix expression returned multiple builds for job $jobName.")
|
||||
if $job;
|
||||
$job = $j;
|
||||
}
|
||||
}
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
error($c, "Nix expression did not return a job named $jobName.") unless $job;
|
||||
|
||||
my %currentBuilds;
|
||||
my $newBuild = checkBuild(
|
||||
$c->model('DB'), $build->project, $build->jobset,
|
||||
$inputInfo, $nixExprInput, $job, \%currentBuilds, undef);
|
||||
|
||||
error($c, "This build has already been performed.") unless $newBuild;
|
||||
|
||||
$c->flash->{buildMsg} = "Build " . $newBuild->id . " added to the queue.";
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Root')->action_for('queue')));
|
||||
}
|
||||
|
||||
|
||||
sub bump : Chained('buildChain') PathPart('bump') {
|
||||
my ($self, $c, $x) = @_;
|
||||
|
||||
my $build = $c->stash->{build};
|
||||
|
||||
requireBumpPrivileges($c, $build->project);
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$build->update({globalpriority => time()});
|
||||
});
|
||||
|
||||
$c->flash->{successMsg} = "Build has been bumped to the front of the queue.";
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("build"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub get_info : Chained('buildChain') PathPart('api/get-info') Args(0) {
|
||||
sub get_info : Chained('build') PathPart('api/get-info') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $build = $c->stash->{build};
|
||||
$c->stash->{json}->{buildId} = $build->id;
|
||||
$c->stash->{json}->{drvPath} = $build->drvpath;
|
||||
my $out = getMainOutput($build);
|
||||
$c->stash->{json}->{outPath} = $out->path if defined $out;
|
||||
# !!! strip the json prefix
|
||||
$c->stash->{jsonBuildId} = $build->id;
|
||||
$c->stash->{jsonDrvPath} = $build->drvpath;
|
||||
$c->stash->{jsonOutPath} = $build->outpath;
|
||||
$c->forward('View::JSON');
|
||||
}
|
||||
|
||||
|
||||
sub evals : Chained('buildChain') PathPart('evals') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'evals.tt';
|
||||
|
||||
my $page = int($c->req->param('page') || "1") || 1;
|
||||
|
||||
my $resultsPerPage = 20;
|
||||
|
||||
my $evals = $c->stash->{build}->jobsetevals;
|
||||
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
$c->stash->{evals} = getEvals($c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage)
|
||||
}
|
||||
|
||||
|
||||
# Redirect to the latest finished evaluation that contains this build.
|
||||
sub eval : Chained('buildChain') PathPart('eval') {
|
||||
my ($self, $c, @rest) = @_;
|
||||
|
||||
my $eval = $c->stash->{build}->jobsetevals->find(
|
||||
{ hasnewbuilds => 1 },
|
||||
{ order_by => "id DESC", rows => 1
|
||||
, "not exists (select 1 from jobsetevalmembers m2 join builds b2 on me.eval = m2.eval and m2.build = b2.id and b2.finished = 0)"
|
||||
});
|
||||
|
||||
notFound($c, "There is no finished evaluation containing this build.") unless defined $eval;
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for("view"), [$eval->id], @rest, $c->req->params));
|
||||
}
|
||||
|
||||
|
||||
sub reproduce : Chained('buildChain') PathPart('reproduce') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->response->content_type('text/x-shellscript');
|
||||
$c->response->header('Content-Disposition', 'attachment; filename="reproduce-build-' . $c->stash->{build}->id . '.sh"');
|
||||
$c->stash->{template} = 'reproduce.tt';
|
||||
$c->stash->{eval} = getFirstEval($c->stash->{build});
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
package Hydra::Controller::Channel;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
|
||||
|
||||
sub channel : Chained('/') PathPart('channel/custom') CaptureArgs(3) {
|
||||
my ($self, $c, $projectName, $jobsetName, $channelName) = @_;
|
||||
|
||||
$c->stash->{project} = $c->model('DB::Projects')->find($projectName);
|
||||
|
||||
notFound($c, "Project $projectName doesn't exist.")
|
||||
if !$c->stash->{project};
|
||||
|
||||
$c->stash->{jobset} = $c->stash->{project}->jobsets->find({
|
||||
name => $jobsetName
|
||||
});
|
||||
|
||||
notFound($c, "Jobset $jobsetName doesn't exist.")
|
||||
if !$c->stash->{jobset};
|
||||
|
||||
my $lastSuccessful = $c->model('DB::Builds')->find(
|
||||
{ 'eval.hasnewbuilds' => 1
|
||||
, jobset_id => $c->stash->{jobset}->id,
|
||||
, job => $channelName
|
||||
, buildstatus => 0
|
||||
},
|
||||
{ rows => 1, order_by => "eval.id desc"
|
||||
, join => { jobsetevalmembers => 'eval' }
|
||||
}
|
||||
);
|
||||
|
||||
notFound($c, "Channel $channelName either doesn't exist ".
|
||||
"or was never built successfully.")
|
||||
if !$lastSuccessful;
|
||||
|
||||
$c->stash->{lastSuccessful} = $lastSuccessful;
|
||||
}
|
||||
|
||||
|
||||
sub overview : Chained('channel') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{constituents} = [
|
||||
$c->stash->{lastSuccessful}->constituents_->search(
|
||||
{}, {order_by => ["job"]}
|
||||
)
|
||||
];
|
||||
|
||||
$c->stash->{genericChannel} = 0;
|
||||
$c->stash->{template} = 'channel-contents.tt';
|
||||
}
|
||||
|
||||
|
||||
sub nixexprs : Chained('channel') PathPart('') Args(1) {
|
||||
my ($self, $c, $productName) = @_;
|
||||
|
||||
my $product = $c->stash->{lastSuccessful}->buildproducts->find(
|
||||
{ type => "channel", name => $productName }
|
||||
);
|
||||
|
||||
my $url = $c->uri_for(
|
||||
$c->controller("Build")->action_for("download"),
|
||||
[$c->stash->{lastSuccessful}->id],
|
||||
$product->productnr,
|
||||
$productName
|
||||
);
|
||||
|
||||
$c->res->redirect($url);
|
||||
}
|
||||
|
||||
|
||||
sub binary_cache_url : Chained('channel') PathPart('binary-cache-url') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{'plain'} = { data => $c->uri_for('/') };
|
||||
$c->response->content_type('text/plain');
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
@@ -1,248 +1,61 @@
|
||||
package Hydra::Controller::Job;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::ListBuilds';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use JSON::MaybeXS;
|
||||
use Net::Prometheus;
|
||||
|
||||
|
||||
sub job : Chained('/') PathPart('job') CaptureArgs(3) {
|
||||
my ($self, $c, $projectName, $jobsetName, $jobName) = @_;
|
||||
|
||||
$c->stash->{jobset} = $c->model('DB::Jobsets')->find({ project => $projectName, name => $jobsetName });
|
||||
|
||||
if (!$c->stash->{jobset}) {
|
||||
my $rename = $c->model('DB::JobsetRenames')->find({ project => $projectName, from_ => $jobsetName });
|
||||
notFound($c, "Jobset ‘$jobsetName’ doesn't exist.") unless defined $rename;
|
||||
|
||||
# Return a permanent redirect to the new jobset name.
|
||||
my @captures = @{$c->req->captures};
|
||||
$captures[1] = $rename->to_;
|
||||
$c->res->redirect($c->uri_for($c->action, \@captures, $c->req->params), 301);
|
||||
$c->detach;
|
||||
}
|
||||
|
||||
$c->stash->{job} = $jobName;
|
||||
$c->stash->{project} = $c->stash->{jobset}->project;
|
||||
$c->stash->{job_} = $c->model('DB::Jobs')->search({project => $projectName, jobset => $jobsetName, name => $jobName});
|
||||
$c->stash->{job} = $c->stash->{job_}->single
|
||||
or notFound($c, "Job $projectName:$jobsetName:$jobName doesn't exist.");
|
||||
$c->stash->{project} = $c->stash->{job}->project;
|
||||
$c->stash->{jobset} = $c->stash->{job}->jobset;
|
||||
}
|
||||
|
||||
sub shield :Chained('job') PathPart('shield') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $job = $c->stash->{job};
|
||||
|
||||
my $lastBuild = $c->stash->{jobset}->builds->find(
|
||||
{ job => $job, finished => 1 },
|
||||
{ order_by => 'id DESC', rows => 1, columns => [@buildListColumns] }
|
||||
);
|
||||
notFound($c, "No latest build for job ‘$job’.") unless defined $lastBuild;
|
||||
|
||||
my $color =
|
||||
$lastBuild->buildstatus == 0 ? "green" :
|
||||
$lastBuild->buildstatus == 4 ? "yellow" :
|
||||
"red";
|
||||
my $message =
|
||||
$lastBuild->buildstatus == 0 ? "passing" :
|
||||
$lastBuild->buildstatus == 4 ? "cancelled" :
|
||||
"failing";
|
||||
|
||||
$c->response->content_type('application/json');
|
||||
$c->stash->{'plain'} = {
|
||||
data => scalar (encode_json(
|
||||
{
|
||||
schemaVersion => 1,
|
||||
label => "hydra build",
|
||||
color => $color,
|
||||
message => $message,
|
||||
}))
|
||||
};
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
|
||||
sub prometheus : Chained('job') PathPart('prometheus') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $prometheus = Net::Prometheus->new;
|
||||
|
||||
my $lastBuild = $c->stash->{jobset}->builds->find(
|
||||
{ job => $c->stash->{job}, finished => 1 },
|
||||
{ order_by => 'id DESC', rows => 1, columns => ["stoptime", "buildstatus", "closuresize", "size"] }
|
||||
);
|
||||
|
||||
$prometheus->new_counter(
|
||||
name => "hydra_job_completion_time",
|
||||
help => "The most recent job's completion time",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->stoptime);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_job_failed",
|
||||
help => "Record if the most recent version of this job failed (1 means failed)",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->buildstatus > 0);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_closure_size",
|
||||
help => "Closure size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->closuresize);
|
||||
|
||||
$prometheus->new_gauge(
|
||||
name => "hydra_build_output_size",
|
||||
help => "Output size of the last job's build in bytes",
|
||||
labels => [ "project", "jobset", "job" ]
|
||||
)->labels(
|
||||
$c->stash->{project}->name,
|
||||
$c->stash->{jobset}->name,
|
||||
$c->stash->{job},
|
||||
)->inc($lastBuild->size);
|
||||
|
||||
$c->stash->{'plain'} = { data => $prometheus->render };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
sub overview : Chained('job') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'job.tt';
|
||||
|
||||
$c->stash->{lastBuilds} =
|
||||
[ $c->stash->{jobset}->builds->search({ job => $c->stash->{job}, finished => 1 },
|
||||
{ order_by => 'id DESC', rows => 10, columns => [@buildListColumns] }) ];
|
||||
#getBuildStats($c, scalar $c->stash->{job}->builds);
|
||||
|
||||
$c->stash->{queuedBuilds} = [
|
||||
$c->stash->{jobset}->builds->search(
|
||||
{ job => $c->stash->{job}, finished => 0 },
|
||||
{ order_by => ["priority DESC", "id"] }
|
||||
) ];
|
||||
$c->stash->{currentBuilds} = [$c->stash->{job}->builds->search({finished => 1, iscurrent => 1}, { order_by => 'system' })];
|
||||
|
||||
# If this is an aggregate job, then get its constituents.
|
||||
my @constituents = $c->model('DB::Builds')->search(
|
||||
{ aggregate => { -in => $c->stash->{jobset}->builds->search({ job => $c->stash->{job} }, { columns => ["id"], order_by => "id desc", rows => 15 })->as_query } },
|
||||
{ join => 'aggregateconstituents_constituents',
|
||||
columns => ['id', 'job', 'finished', 'buildstatus'],
|
||||
+select => ['aggregateconstituents_constituents.aggregate'],
|
||||
+as => ['aggregate']
|
||||
});
|
||||
$c->stash->{lastBuilds} =
|
||||
[ $c->stash->{job}->builds->search({ finished => 1 },
|
||||
{ order_by => 'timestamp DESC', rows => 10, columns => [@buildListColumns] }) ];
|
||||
|
||||
my $aggregates = {};
|
||||
my %constituentJobs;
|
||||
foreach my $build (@constituents) {
|
||||
$aggregates->{$build->get_column('aggregate')}->{constituents}->{$build->job} =
|
||||
{ id => $build->id, finished => $build->finished, buildstatus => $build->buildstatus };
|
||||
$constituentJobs{$build->job} = 1;
|
||||
}
|
||||
$c->stash->{runningBuilds} = [
|
||||
$c->stash->{job}->builds->search(
|
||||
{ busy => 1 },
|
||||
{ join => ['project']
|
||||
, order_by => ["priority DESC", "timestamp"]
|
||||
, '+select' => ['project.enabled']
|
||||
, '+as' => ['enabled']
|
||||
}
|
||||
) ];
|
||||
|
||||
foreach my $agg (keys %$aggregates) {
|
||||
# FIXME: could be done in one query.
|
||||
$aggregates->{$agg}->{build} =
|
||||
$c->model('DB::Builds')->find({id => $agg}, {columns => [@buildListColumns]}) or die;
|
||||
}
|
||||
|
||||
$c->stash->{aggregates} = $aggregates;
|
||||
$c->stash->{constituentJobs} = [sort (keys %constituentJobs)];
|
||||
|
||||
$c->stash->{starred} = $c->user->starredjobs(
|
||||
{ project => $c->stash->{project}->name
|
||||
, jobset => $c->stash->{jobset}->name
|
||||
, job => $c->stash->{job}
|
||||
})->count == 1 if $c->user_exists;
|
||||
}
|
||||
|
||||
|
||||
sub metrics_tab : Chained('job') PathPart('metric-tab') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'job-metrics-tab.tt';
|
||||
$c->stash->{metrics} = [ $c->stash->{jobset}->buildmetrics->search(
|
||||
{ job => $c->stash->{job} }, { select => ["name"], distinct => 1, order_by => "name", }) ];
|
||||
}
|
||||
|
||||
|
||||
sub build_times : Chained('job') PathPart('build-times') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my @res = $c->stash->{jobset}->builds->search(
|
||||
{ job => $c->stash->{job}, finished => 1, buildstatus => 0, closuresize => { '!=', 0 } },
|
||||
{ join => "actualBuildStep"
|
||||
, "+select" => ["actualBuildStep.stoptime - actualBuildStep.starttime"]
|
||||
, "+as" => ["actualBuildTime"],
|
||||
, order_by => "id" });
|
||||
$self->status_ok($c, entity => [ map { { id => $_->id, timestamp => $_ ->timestamp, value => $_->get_column('actualBuildTime') } } @res ]);
|
||||
}
|
||||
|
||||
|
||||
sub closure_sizes : Chained('job') PathPart('closure-sizes') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my @res = $c->stash->{jobset}->builds->search(
|
||||
{ job => $c->stash->{job}, finished => 1, buildstatus => 0, closuresize => { '!=', 0 } },
|
||||
{ order_by => "id", columns => [ "id", "timestamp", "closuresize" ] });
|
||||
$self->status_ok($c, entity => [ map { { id => $_->id, timestamp => $_ ->timestamp, value => $_->closuresize } } @res ]);
|
||||
}
|
||||
|
||||
|
||||
sub output_sizes : Chained('job') PathPart('output-sizes') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my @res = $c->stash->{jobset}->builds->search(
|
||||
{ job => $c->stash->{job}, finished => 1, buildstatus => 0, size => { '!=', 0 } },
|
||||
{ order_by => "id", columns => [ "id", "timestamp", "size" ] });
|
||||
$self->status_ok($c, entity => [ map { { id => $_->id, timestamp => $_ ->timestamp, value => $_->size } } @res ]);
|
||||
}
|
||||
|
||||
|
||||
sub metric : Chained('job') PathPart('metric') Args(1) {
|
||||
my ($self, $c, $metricName) = @_;
|
||||
|
||||
$c->stash->{template} = 'metric.tt';
|
||||
$c->stash->{metricName} = $metricName;
|
||||
|
||||
my @res = $c->stash->{jobset}->buildmetrics->search(
|
||||
{ job => $c->stash->{job}, name => $metricName },
|
||||
{ order_by => "timestamp", columns => [ "build", "name", "timestamp", "value", "unit" ] });
|
||||
|
||||
$self->status_ok($c, entity => [ map { { id => $_->get_column("build"), timestamp => $_ ->timestamp, value => $_->value, unit => $_->unit } } @res ]);
|
||||
$c->stash->{systems} = [$c->stash->{job}->builds->search({iscurrent => 1}, {select => ["system"], distinct => 1})];
|
||||
}
|
||||
|
||||
|
||||
# Hydra::Base::Controller::ListBuilds needs this.
|
||||
sub get_builds : Chained('job') PathPart('') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{allBuilds} = $c->stash->{jobset}->builds->search({ job => $c->stash->{job} });
|
||||
$c->stash->{allBuilds} = $c->stash->{job}->builds;
|
||||
$c->stash->{jobStatus} = $c->model('DB')->resultset('JobStatusForJob')
|
||||
->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name, $c->stash->{job}->name]});
|
||||
$c->stash->{allJobs} = $c->stash->{job_};
|
||||
$c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForJob')
|
||||
->search({}, {bind => [$c->stash->{jobset}->id, $c->stash->{job}]});
|
||||
->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name, $c->stash->{job}->name]});
|
||||
$c->stash->{channelBaseName} =
|
||||
$c->stash->{project}->name . "-" . $c->stash->{jobset}->name . "-" . $c->stash->{job};
|
||||
}
|
||||
|
||||
|
||||
sub star : Chained('job') PathPart('star') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requirePost($c);
|
||||
requireUser($c);
|
||||
my $args =
|
||||
{ project => $c->stash->{project}->name
|
||||
, jobset => $c->stash->{jobset}->name
|
||||
, job => $c->stash->{job}
|
||||
};
|
||||
if ($c->request->params->{star} eq "1") {
|
||||
$c->user->starredjobs->update_or_create($args);
|
||||
} else {
|
||||
$c->user->starredjobs->find($args)->delete;
|
||||
}
|
||||
$c->stash->{resource}->{success} = 1;
|
||||
$c->stash->{project}->name . "-" . $c->stash->{jobset}->name . "-" . $c->stash->{job}->name;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
package Hydra::Controller::Jobset;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::ListBuilds';
|
||||
@@ -8,176 +7,178 @@ use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
|
||||
sub jobsetChain :Chained('/') :PathPart('jobset') :CaptureArgs(2) {
|
||||
sub jobset : Chained('/') PathPart('jobset') CaptureArgs(2) {
|
||||
my ($self, $c, $projectName, $jobsetName) = @_;
|
||||
|
||||
my $project = $c->model('DB::Projects')->find($projectName);
|
||||
|
||||
notFound($c, "Project ‘$projectName’ doesn't exist.") if !$project;
|
||||
my $project = $c->model('DB::Projects')->find($projectName)
|
||||
or notFound($c, "Project $projectName doesn't exist.");
|
||||
|
||||
$c->stash->{project} = $project;
|
||||
|
||||
$c->stash->{jobset} = $project->jobsets->find({ name => $jobsetName });
|
||||
|
||||
if (!$c->stash->{jobset} && !($c->action->name eq "jobset" and $c->request->method eq "PUT")) {
|
||||
my $rename = $project->jobsetrenames->find({ from_ => $jobsetName });
|
||||
notFound($c, "Jobset ‘$jobsetName’ doesn't exist.") unless defined $rename;
|
||||
|
||||
# Return a permanent redirect to the new jobset name.
|
||||
my @captures = @{$c->req->captures};
|
||||
$captures[1] = $rename->to_;
|
||||
$c->res->redirect($c->uri_for($c->action, \@captures, $c->req->params), 301);
|
||||
$c->detach;
|
||||
}
|
||||
|
||||
$c->stash->{params}->{name} //= $jobsetName;
|
||||
$c->stash->{jobset_} = $project->jobsets->search({name => $jobsetName});
|
||||
$c->stash->{jobset} = $c->stash->{jobset_}->single
|
||||
or notFound($c, "Jobset $jobsetName doesn't exist.");
|
||||
}
|
||||
|
||||
|
||||
sub jobset :Chained('jobsetChain') :PathPart('') :Args(0) :ActionClass('REST::ForBrowsers') { }
|
||||
|
||||
sub jobset_GET {
|
||||
my ($self, $c) = @_;
|
||||
sub jobsetIndex {
|
||||
my ($self, $c, $forceStatus) = @_;
|
||||
|
||||
$c->stash->{template} = 'jobset.tt';
|
||||
|
||||
$c->stash->{evals} = getEvals($c, scalar $c->stash->{jobset}->jobsetevals, 0, 10);
|
||||
#getBuildStats($c, scalar $c->stash->{jobset}->builds);
|
||||
|
||||
$c->stash->{latestEval} = $c->stash->{jobset}->jobsetevals->search({ hasnewbuilds => 1 }, { rows => 1, order_by => ["id desc"] })->single;
|
||||
my $projectName = $c->stash->{project}->name;
|
||||
my $jobsetName = $c->stash->{jobset}->name;
|
||||
|
||||
$c->stash->{totalShares} = getTotalShares($c->model('DB')->schema);
|
||||
# Get the active / inactive jobs in this jobset.
|
||||
my @jobs = $c->stash->{jobset}->jobs->search(
|
||||
{ },
|
||||
{ select => [
|
||||
"name",
|
||||
\ ("exists (select 1 from builds where project = '$projectName' and jobset = '$jobsetName' and job = me.name and isCurrent = 1) as active")
|
||||
]
|
||||
, as => ["name", "active"]
|
||||
, order_by => ["name"] });
|
||||
|
||||
$c->stash->{emailNotification} = $c->config->{email_notification} // 0;
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||
}
|
||||
|
||||
sub jobset_PUT {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
if (length($c->stash->{project}->declfile)) {
|
||||
error($c, "can't modify jobset of declarative project", 403);
|
||||
}
|
||||
|
||||
if (defined $c->stash->{jobset}) {
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
updateJobset($c, $c->stash->{jobset});
|
||||
});
|
||||
|
||||
my $uri = $c->uri_for($self->action_for("jobset"), [$c->stash->{project}->name, $c->stash->{jobset}->name]) . "#tabs-configuration";
|
||||
$self->status_ok($c, entity => { redirect => "$uri" });
|
||||
|
||||
$c->flash->{successMsg} = "The jobset configuration has been updated.";
|
||||
}
|
||||
|
||||
else {
|
||||
my $jobset;
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
# Note: $jobsetName is validated in updateProject, which will
|
||||
# abort the transaction if the name isn't valid.
|
||||
$jobset = $c->stash->{project}->jobsets->create(
|
||||
{name => ".tmp", nixexprinput => "", nixexprpath => "", emailoverride => ""});
|
||||
updateJobset($c, $jobset);
|
||||
});
|
||||
|
||||
my $uri = $c->uri_for($self->action_for("jobset"), [$c->stash->{project}->name, $jobset->name]);
|
||||
$self->status_created($c,
|
||||
location => "$uri",
|
||||
entity => { name => $jobset->name, uri => "$uri", redirect => "$uri", type => "jobset" });
|
||||
}
|
||||
}
|
||||
|
||||
sub jobset_DELETE {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
#requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
requireAdmin($c);
|
||||
|
||||
if (length($c->stash->{project}->declfile)) {
|
||||
error($c, "can't modify jobset of declarative project", 403);
|
||||
}
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$c->stash->{jobset}->jobsetevals->delete;
|
||||
$c->stash->{jobset}->builds->delete;
|
||||
$c->stash->{jobset}->delete;
|
||||
});
|
||||
|
||||
my $uri = $c->uri_for($c->controller('Project')->action_for("project"), [$c->stash->{project}->name]);
|
||||
$self->status_ok($c, entity => { redirect => "$uri" });
|
||||
|
||||
$c->flash->{successMsg} = "The jobset has been deleted.";
|
||||
}
|
||||
|
||||
|
||||
sub jobs_tab : Chained('jobsetChain') PathPart('jobs-tab') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'jobset-jobs-tab.tt';
|
||||
|
||||
$c->stash->{filter} = $c->request->params->{filter} // "";
|
||||
my $filter = "%" . $c->stash->{filter} . "%";
|
||||
|
||||
my ($evals, $builds) = searchBuildsAndEvalsForJobset(
|
||||
$c->stash->{jobset},
|
||||
{ job => { ilike => $filter }, ischannel => 0 },
|
||||
10000
|
||||
);
|
||||
|
||||
if ($c->request->params->{showInactive}) {
|
||||
$c->stash->{showInactive} = 1;
|
||||
foreach my $job ($c->stash->{jobset}->jobs->search({ name => { ilike => $filter } })) {
|
||||
next if defined $builds->{$job->name};
|
||||
$c->stash->{inactiveJobs}->{$job->name} = $builds->{$job->name} = 1;
|
||||
$c->stash->{activeJobs} = [];
|
||||
$c->stash->{inactiveJobs} = [];
|
||||
foreach my $job (@jobs) {
|
||||
if ($job->get_column('active')) {
|
||||
push @{$c->stash->{activeJobs}}, $job->name;
|
||||
} else {
|
||||
push @{$c->stash->{inactiveJobs}}, $job->name;
|
||||
}
|
||||
}
|
||||
|
||||
$c->stash->{evals} = $evals;
|
||||
my @jobs = sort (keys %$builds);
|
||||
$c->stash->{nrJobs} = scalar @jobs;
|
||||
splice @jobs, 250 if $c->stash->{filter} eq "";
|
||||
$c->stash->{jobs} = [@jobs];
|
||||
$c->stash->{systems} =
|
||||
[ $c->stash->{jobset}->builds->search({ iscurrent => 1 }, { select => ["system"], distinct => 1, order_by => "system" }) ];
|
||||
|
||||
# status per system
|
||||
my @systems = ();
|
||||
foreach my $system (@{$c->stash->{systems}}) {
|
||||
push(@systems, $system->system);
|
||||
}
|
||||
|
||||
if($forceStatus || scalar(@{$c->stash->{activeJobs}}) <= 50) {
|
||||
my @select = ();
|
||||
my @as = ();
|
||||
push(@select, "job"); push(@as, "job");
|
||||
foreach my $system (@systems) {
|
||||
push(@select, "(select buildstatus from Builds b where b.id = (select max(id) from Builds t where t.project = me.project and t.jobset = me.jobset and t.job = me.job and t.system = '$system' and t.iscurrent = 1 ))");
|
||||
push(@as, $system);
|
||||
push(@select, "(select b.id from Builds b where b.id = (select max(id) from Builds t where t.project = me.project and t.jobset = me.jobset and t.job = me.job and t.system = '$system' and t.iscurrent = 1 ))");
|
||||
push(@as, "$system-build");
|
||||
}
|
||||
$c->stash->{activeJobsStatus} =
|
||||
[ $c->model('DB')->resultset('ActiveJobsForJobset')->search(
|
||||
{},
|
||||
{ bind => [$c->stash->{project}->name, $c->stash->{jobset}->name]
|
||||
, select => \@select
|
||||
, as => \@as
|
||||
, order_by => ["job"]
|
||||
})];
|
||||
}
|
||||
|
||||
# Last builds for jobset.
|
||||
$c->stash->{lastBuilds} =
|
||||
[ $c->stash->{jobset}->builds->search({ finished => 1 },
|
||||
{ order_by => "timestamp DESC", rows => 5, columns => [@buildListColumns] }) ];
|
||||
}
|
||||
|
||||
|
||||
sub channels_tab : Chained('jobsetChain') PathPart('channels-tab') Args(0) {
|
||||
sub index : Chained('jobset') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'jobset-channels-tab.tt';
|
||||
jobsetIndex($self, $c, 0);
|
||||
}
|
||||
|
||||
my ($evals, $builds) = searchBuildsAndEvalsForJobset(
|
||||
$c->stash->{jobset},
|
||||
{ ischannel => 1 }
|
||||
);
|
||||
|
||||
$c->stash->{evals} = $evals;
|
||||
my @channels = sort (keys %$builds);
|
||||
$c->stash->{channels} = [@channels];
|
||||
sub indexWithStatus : Chained('jobset') PathPart('') Args(1) {
|
||||
my ($self, $c, $forceStatus) = @_;
|
||||
jobsetIndex($self, $c, 1);
|
||||
}
|
||||
|
||||
|
||||
# Hydra::Base::Controller::ListBuilds needs this.
|
||||
sub get_builds : Chained('jobsetChain') PathPart('') CaptureArgs(0) {
|
||||
sub get_builds : Chained('jobset') PathPart('') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{allBuilds} = $c->stash->{jobset}->builds;
|
||||
$c->stash->{jobStatus} = $c->model('DB')->resultset('JobStatusForJobset')
|
||||
->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name]});
|
||||
$c->stash->{allJobsets} = $c->stash->{jobset_};
|
||||
$c->stash->{allJobs} = $c->stash->{jobset}->jobs;
|
||||
$c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForJobset')
|
||||
->search({}, {bind => [$c->stash->{jobset}->id]});
|
||||
->search({}, {bind => [$c->stash->{project}->name, $c->stash->{jobset}->name]});
|
||||
$c->stash->{channelBaseName} =
|
||||
$c->stash->{project}->name . "-" . $c->stash->{jobset}->name;
|
||||
}
|
||||
|
||||
|
||||
sub edit : Chained('jobsetChain') PathPart Args(0) {
|
||||
sub edit : Chained('jobset') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->stash->{template} = 'edit-jobset.tt';
|
||||
$c->stash->{edit} = !defined $c->stash->{params}->{cloneJobset};
|
||||
$c->stash->{cloneJobset} = defined $c->stash->{params}->{cloneJobset};
|
||||
$c->stash->{totalShares} = getTotalShares($c->model('DB')->schema);
|
||||
$c->stash->{emailNotification} = $c->config->{email_notification} // 0;
|
||||
$c->stash->{template} = 'jobset.tt';
|
||||
$c->stash->{edit} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub submit : Chained('jobset') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
requirePost($c);
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
updateJobset($c, $c->stash->{jobset});
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("index"),
|
||||
[$c->stash->{project}->name, $c->stash->{jobset}->name]));
|
||||
}
|
||||
|
||||
|
||||
sub hide : Chained('jobset') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{jobset}->update({ hidden => 1, enabled => 0 });
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Project')->action_for("view"),
|
||||
[$c->stash->{project}->name]));
|
||||
}
|
||||
|
||||
|
||||
sub unhide : Chained('jobset') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{jobset}->update({ hidden => 0 });
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Project')->action_for("view"),
|
||||
[$c->stash->{project}->name]));
|
||||
}
|
||||
|
||||
|
||||
sub delete : Chained('jobset') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
requirePost($c);
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{jobset}->delete;
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Project')->action_for("view"),
|
||||
[$c->stash->{project}->name]));
|
||||
}
|
||||
|
||||
|
||||
@@ -185,208 +186,148 @@ sub nixExprPathFromParams {
|
||||
my ($c) = @_;
|
||||
|
||||
# The Nix expression path must be relative and can't contain ".." elements.
|
||||
my $nixExprPath = trim $c->stash->{params}->{"nixexprpath"};
|
||||
error($c, "Invalid Nix expression path ‘$nixExprPath’.") if $nixExprPath !~ /^$relPathRE$/;
|
||||
my $nixExprPath = trim $c->request->params->{"nixexprpath"};
|
||||
error($c, "Invalid Nix expression path: $nixExprPath") if $nixExprPath !~ /^$relPathRE$/;
|
||||
|
||||
my $nixExprInput = trim $c->stash->{params}->{"nixexprinput"};
|
||||
error($c, "Invalid Nix expression input name ‘$nixExprInput’.") unless $nixExprInput =~ /^[[:alpha:]][\w-]*$/;
|
||||
my $nixExprInput = trim $c->request->params->{"nixexprinput"};
|
||||
error($c, "Invalid Nix expression input name: $nixExprInput") unless $nixExprInput =~ /^\w+$/;
|
||||
|
||||
return ($nixExprPath, $nixExprInput);
|
||||
}
|
||||
|
||||
|
||||
sub checkInputValue {
|
||||
my ($c, $name, $type, $value) = @_;
|
||||
$value = trim $value unless $type eq "string";
|
||||
sub checkInput {
|
||||
my ($c, $baseName) = @_;
|
||||
|
||||
error($c, "The value ‘$value’ of input ‘$name’ is not a Boolean (‘true’ or ‘false’).") if
|
||||
$type eq "boolean" && !($value eq "true" || $value eq "false");
|
||||
my $inputName = trim $c->request->params->{"input-$baseName-name"};
|
||||
error($c, "Invalid input name: $inputName") unless $inputName =~ /^[[:alpha:]]\w*$/;
|
||||
|
||||
error($c, "The value ‘$value’ of input ‘$name’ does not specify a Hydra evaluation. "
|
||||
. "It should be either the number of a specific evaluation, the name of "
|
||||
. "a jobset (given as <project>:<jobset>), or the name of a job (<project>:<jobset>:<job>).")
|
||||
if $type eq "eval" && $value !~ /^\d+$/
|
||||
&& $value !~ /^$projectNameRE:$jobsetNameRE$/
|
||||
&& $value !~ /^$projectNameRE:$jobsetNameRE:$jobNameRE$/;
|
||||
my $inputType = trim $c->request->params->{"input-$baseName-type"};
|
||||
error($c, "Invalid input type: $inputType") unless
|
||||
$inputType eq "svn" || $inputType eq "svn-checkout" || $inputType eq "hg" || $inputType eq "tarball" ||
|
||||
$inputType eq "string" || $inputType eq "path" || $inputType eq "boolean" || $inputType eq "bzr" || $inputType eq "bzr-checkout" ||
|
||||
$inputType eq "git" || $inputType eq "build" || $inputType eq "sysbuild" ;
|
||||
|
||||
return $value;
|
||||
return ($inputName, $inputType);
|
||||
}
|
||||
|
||||
|
||||
sub knownInputTypes {
|
||||
my ($c) = @_;
|
||||
|
||||
my @keys = keys %{$c->stash->{inputTypes}};
|
||||
my $types = "";
|
||||
my $counter = 0;
|
||||
|
||||
foreach my $key (@keys) {
|
||||
$types = $types . "and ‘$key’" if ++$counter == scalar(@keys);
|
||||
$types = $types . "‘$key’, " if $counter != scalar(@keys);
|
||||
}
|
||||
|
||||
return $types;
|
||||
sub checkInputValue {
|
||||
my ($c, $type, $value) = @_;
|
||||
$value = trim $value;
|
||||
error($c, "Invalid Boolean value: $value") if
|
||||
$type eq "boolean" && !($value eq "true" || $value eq "false");
|
||||
return $value;
|
||||
}
|
||||
|
||||
|
||||
sub updateJobset {
|
||||
my ($c, $jobset) = @_;
|
||||
|
||||
my $oldName = $jobset->name;
|
||||
my $jobsetName = $c->stash->{params}->{name};
|
||||
error($c, "Invalid jobset identifier ‘$jobsetName’.") if $jobsetName !~ /^$jobsetNameRE$/;
|
||||
my $jobsetName = trim $c->request->params->{"name"};
|
||||
error($c, "Invalid jobset name: $jobsetName") unless $jobsetName =~ /^[[:alpha:]][\w\-]*$/;
|
||||
|
||||
error($c, "Cannot rename jobset to ‘$jobsetName’ since that identifier is already taken.")
|
||||
if $jobsetName ne $oldName && defined $c->stash->{project}->jobsets->find({ name => $jobsetName });
|
||||
|
||||
my $type = int($c->stash->{params}->{"type"} // 0);
|
||||
|
||||
my ($nixExprPath, $nixExprInput);
|
||||
my $flake;
|
||||
|
||||
if ($type == 0) {
|
||||
($nixExprPath, $nixExprInput) = nixExprPathFromParams $c;
|
||||
} elsif ($type == 1) {
|
||||
$flake = trim($c->stash->{params}->{"flake"});
|
||||
error($c, "Invalid flake URI ‘$flake’.") if $flake !~ /^[a-zA-Z]/;
|
||||
} else {
|
||||
error($c, "Invalid jobset type.");
|
||||
}
|
||||
|
||||
my $enabled = int($c->stash->{params}->{enabled});
|
||||
die if $enabled < 0 || $enabled > 3;
|
||||
|
||||
my $shares = int($c->stash->{params}->{schedulingshares} // 1);
|
||||
error($c, "The number of scheduling shares must be positive.") if $shares <= 0;
|
||||
|
||||
my $checkinterval = int(trim($c->stash->{params}->{checkinterval}));
|
||||
|
||||
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command
|
||||
&& !($c->config->{dynamicruncommand}->{enable}
|
||||
&& $jobset->project->enable_dynamic_run_command))
|
||||
{
|
||||
badRequest($c, "Dynamic RunCommand is not enabled by the server or the parent project.");
|
||||
}
|
||||
my ($nixExprPath, $nixExprInput) = nixExprPathFromParams $c;
|
||||
|
||||
$jobset->update(
|
||||
{ name => $jobsetName
|
||||
, description => trim($c->stash->{params}->{"description"})
|
||||
, description => trim($c->request->params->{"description"})
|
||||
, nixexprpath => $nixExprPath
|
||||
, nixexprinput => $nixExprInput
|
||||
, enabled => $enabled
|
||||
, enableemail => defined $c->stash->{params}->{enableemail} ? 1 : 0
|
||||
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||
, emailoverride => trim($c->stash->{params}->{emailoverride}) || ""
|
||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||
, keepnr => int(trim($c->stash->{params}->{keepnr} // "0"))
|
||||
, checkinterval => $checkinterval
|
||||
, triggertime => ($enabled && $checkinterval > 0) ? $jobset->triggertime // time() : undef
|
||||
, schedulingshares => $shares
|
||||
, type => $type
|
||||
, flake => $flake
|
||||
, enabled => trim($c->request->params->{enabled}) eq "1" ? 1 : 0
|
||||
, enableemail => trim($c->request->params->{enableemail}) eq "1" ? 1 : 0
|
||||
, emailoverride => trim($c->request->params->{emailoverride}) || ""
|
||||
, keepnr => trim($c->request->params->{keepnr}) || 3
|
||||
});
|
||||
|
||||
$jobset->project->jobsetrenames->search({ from_ => $jobsetName })->delete;
|
||||
$jobset->project->jobsetrenames->create({ from_ => $oldName, to_ => $jobsetName })
|
||||
if $oldName ne ".tmp" && $jobsetName ne $oldName;
|
||||
my %inputNames;
|
||||
|
||||
# Set the inputs of this jobset.
|
||||
$jobset->jobsetinputs->delete;
|
||||
# Process the inputs of this jobset.
|
||||
foreach my $param (keys %{$c->request->params}) {
|
||||
next unless $param =~ /^input-(\w+)-name$/;
|
||||
my $baseName = $1;
|
||||
next if $baseName eq "template";
|
||||
|
||||
if ($type == 0) {
|
||||
foreach my $name (keys %{$c->stash->{params}->{inputs}}) {
|
||||
my $inputData = $c->stash->{params}->{inputs}->{$name};
|
||||
my $type = $inputData->{type};
|
||||
my $value = $inputData->{value};
|
||||
my $emailresponsible = defined $inputData->{emailresponsible} ? 1 : 0;
|
||||
my $types = knownInputTypes($c);
|
||||
my ($inputName, $inputType) = checkInput($c, $baseName);
|
||||
|
||||
badRequest($c, "Invalid input name ‘$name’.") unless $name =~ /^[[:alpha:]][\w-]*$/;
|
||||
badRequest($c, "Invalid input type ‘$type’; valid types: $types.") unless defined $c->stash->{inputTypes}->{$type};
|
||||
$inputNames{$inputName} = 1;
|
||||
|
||||
my $input = $jobset->jobsetinputs->create(
|
||||
{ name => $name,
|
||||
type => $type,
|
||||
emailresponsible => $emailresponsible
|
||||
my $input;
|
||||
if ($baseName =~ /^\d+$/) { # numeric base name is auto-generated, i.e. a new entry
|
||||
$input = $jobset->jobsetinputs->create(
|
||||
{ name => $inputName
|
||||
, type => $inputType
|
||||
});
|
||||
} else { # it's an existing input
|
||||
$input = ($jobset->jobsetinputs->search({name => $baseName}))[0];
|
||||
die unless defined $input;
|
||||
$input->update({name => $inputName, type => $inputType});
|
||||
}
|
||||
|
||||
$value = checkInputValue($c, $name, $type, $value);
|
||||
$input->jobsetinputalts->create({altnr => 0, value => $value});
|
||||
# Update the values for this input. Just delete all the
|
||||
# current ones, then create the new values.
|
||||
$input->jobsetinputalts->delete_all;
|
||||
my $values = $c->request->params->{"input-$baseName-values"};
|
||||
$values = [] unless defined $values;
|
||||
$values = [$values] unless ref($values) eq 'ARRAY';
|
||||
my $altnr = 0;
|
||||
foreach my $value (@{$values}) {
|
||||
$value = checkInputValue($c, $inputType, $value);
|
||||
$input->jobsetinputalts->create({altnr => $altnr++, value => $value});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub clone : Chained('jobsetChain') PathPart('clone') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->stash->{template} = 'edit-jobset.tt';
|
||||
$c->stash->{cloneJobset} = 1;
|
||||
$c->stash->{totalShares} = getTotalShares($c->model('DB')->schema);
|
||||
}
|
||||
|
||||
|
||||
sub evals :Chained('jobsetChain') :PathPart('evals') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub evals_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'evals.tt';
|
||||
|
||||
my $page = int($c->req->param('page') || "1") || 1;
|
||||
|
||||
my $resultsPerPage = 20;
|
||||
|
||||
my $evals = $c->stash->{jobset}->jobsetevals;
|
||||
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
my $offset = ($page - 1) * $resultsPerPage;
|
||||
$c->stash->{evals} = getEvals($c, $evals, $offset, $resultsPerPage);
|
||||
my %entity = (
|
||||
evals => [ map { $_->{eval} } @{$c->stash->{evals}} ],
|
||||
first => "?page=1",
|
||||
last => "?page=" . POSIX::ceil($c->stash->{total}/$resultsPerPage)
|
||||
);
|
||||
if ($page > 1) {
|
||||
$entity{previous} = "?page=" . ($page - 1);
|
||||
# Get rid of deleted inputs.
|
||||
my @inputs = $jobset->jobsetinputs->all;
|
||||
foreach my $input (@inputs) {
|
||||
$input->delete unless defined $inputNames{$input->name};
|
||||
}
|
||||
if ($page < POSIX::ceil($c->stash->{total}/$resultsPerPage)) {
|
||||
$entity{next} = "?page=" . ($page + 1);
|
||||
}
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => \%entity
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('jobsetChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
sub clone : Chained('jobset') PathPart('clone') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
my $jobset = $c->stash->{jobset};
|
||||
requireProjectOwner($c, $jobset->project);
|
||||
|
||||
my $jobsetName = $c->stash->{params}->{name};
|
||||
$c->stash->{jobset} = $c->stash->{project}->jobsets->find(
|
||||
{ name => $jobsetName },
|
||||
{ '+columns' => { 'errormsg' => 'errormsg' } }
|
||||
);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{jobset});
|
||||
$c->stash->{template} = 'clone-jobset.tt';
|
||||
}
|
||||
|
||||
# Redirect to the latest finished evaluation of this jobset.
|
||||
sub latest_eval : Chained('jobsetChain') PathPart('latest-eval') {
|
||||
my ($self, $c, @args) = @_;
|
||||
my $eval = getLatestFinishedEval($c->stash->{jobset})
|
||||
or notFound($c, "No evaluation found.");
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for("view"), [$eval->id], @args, $c->req->params));
|
||||
|
||||
sub clone_submit : Chained('jobset') PathPart('clone/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $jobset = $c->stash->{jobset};
|
||||
requireProjectOwner($c, $jobset->project);
|
||||
requirePost($c);
|
||||
|
||||
my $newjobsetName = trim $c->request->params->{"newjobset"};
|
||||
error($c, "Invalid jobset name: $newjobsetName") unless $newjobsetName =~ /^[[:alpha:]][\w\-]*$/;
|
||||
|
||||
my $newjobset;
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$newjobset = $jobset->project->jobsets->create(
|
||||
{ name => $newjobsetName
|
||||
, description => $jobset->description
|
||||
, nixexprpath => $jobset->nixexprpath
|
||||
, nixexprinput => $jobset->nixexprinput
|
||||
, enabled => 0
|
||||
, enableemail => $jobset->enableemail
|
||||
, emailoverride => $jobset->emailoverride || ""
|
||||
});
|
||||
|
||||
foreach my $input ($jobset->jobsetinputs) {
|
||||
my $newinput = $newjobset->jobsetinputs->create({name => $input->name, type => $input->type});
|
||||
foreach my $inputalt ($input->jobsetinputalts) {
|
||||
$newinput->jobsetinputalts->create({altnr => $inputalt->altnr, value => $inputalt->value});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Jobset')->action_for("edit"), [$jobset->project->name, $newjobsetName]));
|
||||
}
|
||||
|
||||
|
||||
|
||||
1;
|
||||
|
||||
@@ -1,212 +0,0 @@
|
||||
package Hydra::Controller::JobsetEval;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::NixChannel';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::BuildDiff;
|
||||
use List::SomeUtils qw(uniq);
|
||||
|
||||
|
||||
sub evalChain : Chained('/') PathPart('eval') CaptureArgs(1) {
|
||||
my ($self, $c, $evalId) = @_;
|
||||
|
||||
my $eval = $c->model('DB::JobsetEvals')->find($evalId)
|
||||
or notFound($c, "Evaluation $evalId doesn't exist.");
|
||||
|
||||
$c->stash->{eval} = $eval;
|
||||
$c->stash->{jobset} = $eval->jobset;
|
||||
$c->stash->{project} = $eval->jobset->project;
|
||||
}
|
||||
|
||||
|
||||
sub view :Chained('evalChain') :PathPart('') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub view_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'jobset-eval.tt';
|
||||
|
||||
my $eval = $c->stash->{eval};
|
||||
|
||||
$c->stash->{filter} = $c->request->params->{filter} // "";
|
||||
my $filter = $c->stash->{filter} eq "" ? {} : { job => { ilike => "%" . $c->stash->{filter} . "%" } };
|
||||
|
||||
my $compare = $c->req->params->{compare};
|
||||
my $eval2;
|
||||
|
||||
# Allow comparing this evaluation against the previous evaluation
|
||||
# (default), an arbitrary evaluation, or the latest completed
|
||||
# evaluation of another jobset.
|
||||
if (defined $compare) {
|
||||
if ($compare =~ /^\d+$/) {
|
||||
$eval2 = $c->model('DB::JobsetEvals')->find($compare)
|
||||
or notFound($c, "Evaluation $compare doesn't exist.");
|
||||
} elsif ($compare =~ /^-(\d+)$/) {
|
||||
my $t = int($1);
|
||||
$eval2 = $c->stash->{jobset}->jobsetevals->find(
|
||||
{ hasnewbuilds => 1, timestamp => {'<=', $eval->timestamp - $t} },
|
||||
{ order_by => "timestamp desc", rows => 1});
|
||||
} elsif (defined $compare && $compare =~ /^($jobsetNameRE)$/) {
|
||||
my $j = $c->stash->{project}->jobsets->find({name => $compare})
|
||||
or notFound($c, "Jobset $compare doesn't exist.");
|
||||
$eval2 = getLatestFinishedEval($j);
|
||||
} else {
|
||||
notFound($c, "Unknown comparison source ‘$compare’.");
|
||||
}
|
||||
} else {
|
||||
($eval2) = $eval->jobset->jobsetevals->search(
|
||||
{ hasnewbuilds => 1, id => { '<', $eval->id } },
|
||||
{ order_by => "id DESC", rows => 1 });
|
||||
}
|
||||
|
||||
$c->stash->{otherEval} = $eval2 if defined $eval2;
|
||||
|
||||
my @builds = $eval->builds->search($filter, { columns => [@buildListColumns] });
|
||||
my @builds2 = defined $eval2 ? $eval2->builds->search($filter, { columns => [@buildListColumns] }) : ();
|
||||
|
||||
my $diff = buildDiff([@builds], [@builds2]);
|
||||
$c->stash->{stillSucceed} = $diff->{stillSucceed};
|
||||
$c->stash->{stillFail} = $diff->{stillFail};
|
||||
$c->stash->{nowSucceed} = $diff->{nowSucceed};
|
||||
$c->stash->{nowFail} = $diff->{nowFail};
|
||||
$c->stash->{new} = $diff->{new};
|
||||
$c->stash->{removed} = $diff->{removed};
|
||||
$c->stash->{unfinished} = $diff->{unfinished};
|
||||
$c->stash->{aborted} = $diff->{aborted};
|
||||
$c->stash->{totalAborted} = $diff->{totalAborted};
|
||||
$c->stash->{totalFailed} = $diff->{totalFailed};
|
||||
$c->stash->{totalQueued} = $diff->{totalQueued};
|
||||
|
||||
$c->stash->{full} = ($c->req->params->{full} || "0") eq "1";
|
||||
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => $eval
|
||||
);
|
||||
}
|
||||
|
||||
sub errors :Chained('evalChain') :PathPart('errors') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub errors_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'eval-error.tt';
|
||||
|
||||
$c->stash->{eval} = $c->model('DB::JobsetEvals')->find($c->stash->{eval}->id, { prefetch => 'evaluationerror' });
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{eval});
|
||||
}
|
||||
|
||||
sub create_jobset : Chained('evalChain') PathPart('create-jobset') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $eval = $c->stash->{eval};
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->stash->{template} = 'edit-jobset.tt';
|
||||
$c->stash->{createFromEval} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub cancel : Chained('evalChain') PathPart('cancel') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireCancelBuildPrivileges($c, $c->stash->{project});
|
||||
my $n = cancelBuilds($c->model('DB')->schema, $c->stash->{eval}->builds->search_rs({}));
|
||||
$c->flash->{successMsg} = "$n builds have been cancelled.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub restart {
|
||||
my ($self, $c, $condition) = @_;
|
||||
requireRestartPrivileges($c, $c->stash->{project});
|
||||
my $builds = $c->stash->{eval}->builds->search_rs({ finished => 1, buildstatus => $condition });
|
||||
my $n = restartBuilds($c->model('DB')->schema, $builds);
|
||||
$c->flash->{successMsg} = "$n builds have been restarted.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub restart_aborted : Chained('evalChain') PathPart('restart-aborted') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
restart($self, $c, { -in => [3, 4, 9] });
|
||||
}
|
||||
|
||||
|
||||
sub restart_failed : Chained('evalChain') PathPart('restart-failed') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
restart($self, $c, { 'not in' => [0] });
|
||||
}
|
||||
|
||||
|
||||
sub bump : Chained('evalChain') PathPart('bump') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireBumpPrivileges($c, $c->stash->{project}); # FIXME: require admin?
|
||||
my $builds = $c->stash->{eval}->builds->search({ finished => 0 });
|
||||
my $n = $builds->count();
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$builds->update({globalpriority => time()});
|
||||
});
|
||||
$c->flash->{successMsg} = "$n builds have been bumped to the front of the queue.";
|
||||
$c->res->redirect($c->uri_for($c->controller('JobsetEval')->action_for('view'), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
# Hydra::Base::Controller::NixChannel needs this.
|
||||
sub nix : Chained('evalChain') PathPart('channel') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{channelName} = $c->stash->{project}->name . "-" . $c->stash->{jobset}->name . "-latest";
|
||||
$c->stash->{channelBuilds} = $c->stash->{eval}->builds
|
||||
->search_literal("exists (select 1 from buildproducts where build = build.id and type = 'nix-build')")
|
||||
->search({ finished => 1, buildstatus => 0 },
|
||||
{ columns => [@buildListColumns, 'drvpath', 'description', 'homepage']
|
||||
, join => ["buildoutputs"]
|
||||
, order_by => ["build.id", "buildoutputs.name"]
|
||||
, '+select' => ['buildoutputs.path', 'buildoutputs.name'], '+as' => ['outpath', 'outname'] });
|
||||
}
|
||||
|
||||
|
||||
sub job : Chained('evalChain') PathPart('job') {
|
||||
my ($self, $c, $job, @rest) = @_;
|
||||
|
||||
my $build = $c->stash->{eval}->builds->find({job => $job});
|
||||
|
||||
notFound($c, "This evaluation has no job with the specified name.") unless defined $build;
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Build')->action_for("build"), [$build->id], @rest));
|
||||
}
|
||||
|
||||
|
||||
# Return the store paths of all succeeded builds of type 'nix-build'
|
||||
# (i.e. regular packages). Used by the NixOS channel scripts.
|
||||
sub store_paths : Chained('evalChain') PathPart('store-paths') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my @builds = $c->stash->{eval}->builds
|
||||
->search_literal("exists (select 1 from buildproducts where build = build.id and type = 'nix-build')")
|
||||
->search({ finished => 1, buildstatus => 0 },
|
||||
{ columns => [], join => ["buildoutputs"]
|
||||
, '+select' => ['buildoutputs.path'], '+as' => ['outpath'] });
|
||||
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [uniq(sort map {$_->get_column('outpath')} @builds)]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
# Return full info about all the builds in this evaluation.
|
||||
sub all_builds : Chained('evalChain') PathPart('builds') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my @builds = $c->stash->{eval}->builds;
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [@builds],
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
@@ -1,6 +1,5 @@
|
||||
package Hydra::Controller::Project;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::ListBuilds';
|
||||
@@ -8,102 +7,96 @@ use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
|
||||
sub projectChain :Chained('/') :PathPart('project') :CaptureArgs(1) {
|
||||
sub project : Chained('/') PathPart('project') CaptureArgs(1) {
|
||||
my ($self, $c, $projectName) = @_;
|
||||
$c->stash->{params}->{name} //= $projectName;
|
||||
|
||||
my $project = $c->model('DB::Projects')->find($projectName)
|
||||
or notFound($c, "Project $projectName doesn't exist.");
|
||||
|
||||
my $isCreate = $c->action->name eq "project" && $c->request->method eq "PUT";
|
||||
|
||||
$c->stash->{project} = $c->model('DB::Projects')->find($projectName);
|
||||
|
||||
$c->stash->{isProjectOwner} = !$isCreate && isProjectOwner($c, $c->stash->{project});
|
||||
|
||||
notFound($c, "Project ‘$projectName’ doesn't exist.")
|
||||
if !$c->stash->{project} && !$isCreate;
|
||||
$c->stash->{project} = $project;
|
||||
}
|
||||
|
||||
|
||||
sub project :Chained('projectChain') :PathPart('') :Args(0) :ActionClass('REST::ForBrowsers') { }
|
||||
|
||||
sub project_GET {
|
||||
sub view : Chained('project') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'project.tt';
|
||||
|
||||
#getBuildStats($c, scalar $c->stash->{project}->builds);
|
||||
|
||||
$c->stash->{views} = [$c->stash->{project}->views->all];
|
||||
$c->stash->{jobsets} = [jobsetOverview($c, $c->stash->{project})];
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{project});
|
||||
}
|
||||
|
||||
sub project_PUT {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
if (defined $c->stash->{project}) {
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
updateProject($c, $c->stash->{project});
|
||||
});
|
||||
|
||||
my $uri = $c->uri_for($self->action_for("project"), [$c->stash->{project}->name]) . "#tabs-configuration";
|
||||
$self->status_ok($c, entity => { redirect => "$uri" });
|
||||
|
||||
$c->flash->{successMsg} = "The project configuration has been updated.";
|
||||
}
|
||||
|
||||
else {
|
||||
requireMayCreateProjects($c);
|
||||
|
||||
my $project;
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
# Note: $projectName is validated in updateProject,
|
||||
# which will abort the transaction if the name isn't
|
||||
# valid. Idem for the owner.
|
||||
my $owner = $c->user->username;
|
||||
$project = $c->model('DB::Projects')->create(
|
||||
{ name => ".tmp", displayname => "", owner => $owner });
|
||||
updateProject($c, $project);
|
||||
});
|
||||
|
||||
my $uri = $c->uri_for($self->action_for("project"), [$project->name]);
|
||||
$self->status_created($c,
|
||||
location => "$uri",
|
||||
entity => { name => $project->name, uri => "$uri", redirect => "$uri", type => "project" });
|
||||
}
|
||||
}
|
||||
|
||||
sub project_DELETE {
|
||||
sub edit : Chained('project') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$c->stash->{project}->builds->delete;
|
||||
$c->stash->{project}->jobsets->delete;
|
||||
$c->stash->{template} = 'project.tt';
|
||||
$c->stash->{edit} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub submit : Chained('project') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
requirePost($c);
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
updateProject($c, $c->stash->{project});
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("view"), [$c->stash->{project}->name]));
|
||||
}
|
||||
|
||||
|
||||
sub hide : Chained('project') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{project}->update({ hidden => 1, enabled => 0 });
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for("/"));
|
||||
}
|
||||
|
||||
sub unhide : Chained('project') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{project}->update({ hidden => 0 });
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for("/"));
|
||||
}
|
||||
|
||||
sub delete : Chained('project') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
requirePost($c);
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{project}->delete;
|
||||
});
|
||||
|
||||
my $uri = $c->res->redirect($c->uri_for("/"));
|
||||
$self->status_ok($c, entity => { redirect => "$uri" });
|
||||
|
||||
$c->flash->{successMsg} = "The project has been deleted.";
|
||||
}
|
||||
|
||||
|
||||
sub edit : Chained('projectChain') PathPart Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->stash->{template} = 'edit-project.tt';
|
||||
$c->stash->{edit} = 1;
|
||||
|
||||
$c->res->redirect($c->uri_for("/"));
|
||||
}
|
||||
|
||||
|
||||
sub requireMayCreateProjects {
|
||||
my ($c) = @_;
|
||||
requireUser($c);
|
||||
accessDenied($c, "Only administrators or authorised users can perform this operation.")
|
||||
|
||||
requireLogin($c) if !$c->user_exists;
|
||||
|
||||
error($c, "Only administrators or authorised users can perform this operation.")
|
||||
unless $c->check_user_roles('admin') || $c->check_user_roles('create-projects');
|
||||
}
|
||||
|
||||
@@ -113,88 +106,172 @@ sub create : Path('/create-project') {
|
||||
|
||||
requireMayCreateProjects($c);
|
||||
|
||||
$c->stash->{template} = 'edit-project.tt';
|
||||
$c->stash->{template} = 'project.tt';
|
||||
$c->stash->{create} = 1;
|
||||
$c->stash->{edit} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub create_jobset : Chained('projectChain') PathPart('create-jobset') Args(0) {
|
||||
sub create_submit : Path('/create-project/submit') {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireMayCreateProjects($c);
|
||||
|
||||
my $projectName = trim $c->request->params->{name};
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
# Note: $projectName is validated in updateProject,
|
||||
# which will abort the transaction if the name isn't
|
||||
# valid. Idem for the owner.
|
||||
my $owner = $c->check_user_roles('admin')
|
||||
? trim $c->request->params->{owner} : $c->user->username;
|
||||
my $project = $c->model('DB::Projects')->create(
|
||||
{name => $projectName, displayname => "", owner => $owner});
|
||||
updateProject($c, $project);
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($self->action_for("view"), [$projectName]));
|
||||
}
|
||||
|
||||
|
||||
sub create_jobset : Chained('project') PathPart('create-jobset') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->stash->{template} = 'edit-jobset.tt';
|
||||
|
||||
$c->stash->{template} = 'jobset.tt';
|
||||
$c->stash->{create} = 1;
|
||||
$c->stash->{totalShares} = getTotalShares($c->model('DB')->schema);
|
||||
$c->stash->{emailNotification} = $c->config->{email_notification} // 0;
|
||||
$c->stash->{edit} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub create_jobset_submit : Chained('project') PathPart('create-jobset/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
my $jobsetName = trim $c->request->params->{name};
|
||||
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
# Note: $jobsetName is validated in updateProject, which will
|
||||
# abort the transaction if the name isn't valid.
|
||||
my $jobset = $c->stash->{project}->jobsets->create(
|
||||
{name => $jobsetName, nixexprinput => "", nixexprpath => "", emailoverride => ""});
|
||||
Hydra::Controller::Jobset::updateJobset($c, $jobset);
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Jobset')->action_for("index"),
|
||||
[$c->stash->{project}->name, $jobsetName]));
|
||||
}
|
||||
|
||||
|
||||
sub updateProject {
|
||||
my ($c, $project) = @_;
|
||||
my $projectName = trim $c->request->params->{name};
|
||||
error($c, "Invalid project name: " . ($projectName || "(empty)")) unless $projectName =~ /^[[:alpha:]][\w\-]*$/;
|
||||
|
||||
my $displayName = trim $c->request->params->{displayname};
|
||||
error($c, "Invalid display name: $displayName") if $displayName eq "";
|
||||
|
||||
my $owner = $project->owner;
|
||||
if ($c->check_user_roles('admin') and defined $c->stash->{params}->{owner}) {
|
||||
$owner = trim $c->stash->{params}->{owner};
|
||||
badRequest($c, "The user name ‘$owner’ does not exist.")
|
||||
unless defined $c->model('DB::Users')->find($owner);
|
||||
}
|
||||
|
||||
my $projectName = $c->stash->{params}->{name};
|
||||
error($c, "Invalid project identifier ‘$projectName’.") if $projectName !~ /^$projectNameRE$/;
|
||||
|
||||
error($c, "Cannot rename project to ‘$projectName’ since that identifier is already taken.")
|
||||
if $projectName ne $project->name && defined $c->model('DB::Projects')->find($projectName);
|
||||
|
||||
my $displayName = trim $c->stash->{params}->{displayname};
|
||||
error($c, "You must specify a display name.") if $displayName eq "";
|
||||
|
||||
my $enable_dynamic_run_command = defined $c->stash->{params}->{enable_dynamic_run_command} ? 1 : 0;
|
||||
if ($enable_dynamic_run_command && !$c->config->{dynamicruncommand}->{enable}) {
|
||||
badRequest($c, "Dynamic RunCommand is not enabled by the server.");
|
||||
if ($c->check_user_roles('admin')) {
|
||||
$owner = trim $c->request->params->{owner};
|
||||
error($c, "Invalid owner: $owner")
|
||||
unless defined $c->model('DB::Users')->find({username => $owner});
|
||||
}
|
||||
|
||||
$project->update(
|
||||
{ name => $projectName
|
||||
, displayname => $displayName
|
||||
, description => trim($c->stash->{params}->{description})
|
||||
, homepage => trim($c->stash->{params}->{homepage})
|
||||
, enabled => defined $c->stash->{params}->{enabled} ? 1 : 0
|
||||
, hidden => defined $c->stash->{params}->{visible} ? 0 : 1
|
||||
, description => trim($c->request->params->{description})
|
||||
, homepage => trim($c->request->params->{homepage})
|
||||
, enabled => trim($c->request->params->{enabled}) eq "1" ? 1 : 0
|
||||
, owner => $owner
|
||||
, enable_dynamic_run_command => $enable_dynamic_run_command
|
||||
, declfile => trim($c->stash->{params}->{declarative}->{file})
|
||||
, decltype => trim($c->stash->{params}->{declarative}->{type})
|
||||
, declvalue => trim($c->stash->{params}->{declarative}->{value})
|
||||
});
|
||||
if (length($project->declfile)) {
|
||||
# This logic also exists in the DeclarativeJobets tests.
|
||||
# TODO: refactor and deduplicate.
|
||||
$project->jobsets->update_or_create(
|
||||
{ name=> ".jobsets"
|
||||
, nixexprinput => ""
|
||||
, nixexprpath => ""
|
||||
, emailoverride => ""
|
||||
, triggertime => time
|
||||
});
|
||||
} else {
|
||||
$project->jobsets->search({ name => ".jobsets" })->delete;
|
||||
$project->update(
|
||||
{ decltype => ""
|
||||
, declvalue => ""
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Hydra::Base::Controller::ListBuilds needs this.
|
||||
sub get_builds : Chained('projectChain') PathPart('') CaptureArgs(0) {
|
||||
sub get_builds : Chained('project') PathPart('') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{allBuilds} = $c->stash->{project}->builds;
|
||||
$c->stash->{jobStatus} = $c->model('DB')->resultset('JobStatusForProject')
|
||||
->search({}, {bind => [$c->stash->{project}->name]});
|
||||
$c->stash->{allJobsets} = $c->stash->{project}->jobsets;
|
||||
$c->stash->{allJobs} = $c->stash->{project}->jobs;
|
||||
$c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceededForProject')
|
||||
->search({}, {bind => [$c->stash->{project}->name]});
|
||||
$c->stash->{channelBaseName} = $c->stash->{project}->name;
|
||||
}
|
||||
|
||||
|
||||
sub create_view_submit : Chained('project') PathPart('create-view/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
my $viewName = $c->request->params->{name};
|
||||
|
||||
my $view;
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
# Note: $viewName is validated in updateView, which will abort
|
||||
# the transaction if the name isn't valid.
|
||||
$view = $c->stash->{project}->views->create({name => $viewName});
|
||||
Hydra::Controller::View::updateView($c, $view);
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('View')->action_for('view_view'),
|
||||
[$c->stash->{project}->name, $view->name]));
|
||||
}
|
||||
|
||||
|
||||
sub create_view : Chained('project') PathPart('create-view') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
$c->stash->{template} = 'edit-view.tt';
|
||||
$c->stash->{create} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub releases : Chained('project') PathPart('releases') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'releases.tt';
|
||||
$c->stash->{releases} = [$c->stash->{project}->releases->search({},
|
||||
{order_by => ["timestamp DESC"]})];
|
||||
}
|
||||
|
||||
|
||||
sub create_release : Chained('project') PathPart('create-release') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
$c->stash->{template} = 'edit-release.tt';
|
||||
$c->stash->{create} = 1;
|
||||
}
|
||||
|
||||
|
||||
sub create_release_submit : Chained('project') PathPart('create-release/submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
my $releaseName = $c->request->params->{name};
|
||||
|
||||
my $release;
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
# Note: $releaseName is validated in updateRelease, which will
|
||||
# abort the transaction if the name isn't valid.
|
||||
$release = $c->stash->{project}->releases->create(
|
||||
{ name => $releaseName
|
||||
, timestamp => time
|
||||
});
|
||||
Hydra::Controller::Release::updateRelease($c, $release);
|
||||
});
|
||||
|
||||
$c->res->redirect($c->uri_for($c->controller('Release')->action_for('view'),
|
||||
[$c->stash->{project}->name, $release->name]));
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
|
||||
79
src/lib/Hydra/Controller/Release.pm
Normal file
79
src/lib/Hydra/Controller/Release.pm
Normal file
@@ -0,0 +1,79 @@
|
||||
package Hydra::Controller::Release;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Catalyst::Controller';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
|
||||
|
||||
sub release : Chained('/') PathPart('release') CaptureArgs(2) {
|
||||
my ($self, $c, $projectName, $releaseName) = @_;
|
||||
|
||||
$c->stash->{project} = $c->model('DB::Projects')->find($projectName)
|
||||
or notFound($c, "Project $projectName doesn't exist.");
|
||||
|
||||
$c->stash->{release} = $c->stash->{project}->releases->find({name => $releaseName})
|
||||
or notFound($c, "Release $releaseName doesn't exist.");
|
||||
}
|
||||
|
||||
|
||||
sub view : Chained('release') PathPart('') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'release.tt';
|
||||
$c->stash->{members} = [$c->stash->{release}->releasemembers->search({},
|
||||
{order_by => ["description"]})];
|
||||
}
|
||||
|
||||
|
||||
sub updateRelease {
|
||||
my ($c, $release) = @_;
|
||||
|
||||
my $releaseName = trim $c->request->params->{name};
|
||||
error($c, "Invalid release name: $releaseName")
|
||||
unless $releaseName =~ /^$relNameRE$/;
|
||||
|
||||
$release->update(
|
||||
{ name => $releaseName
|
||||
, description => trim $c->request->params->{description}
|
||||
});
|
||||
|
||||
$release->releasemembers->delete_all;
|
||||
foreach my $param (keys %{$c->request->params}) {
|
||||
next unless $param =~ /^member-(\d+)-description$/;
|
||||
my $buildId = $1;
|
||||
my $description = trim $c->request->params->{"member-$buildId-description"};
|
||||
$release->releasemembers->create({ build => $buildId, description => $description });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub edit : Chained('release') PathPart('edit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
$c->stash->{template} = 'edit-release.tt';
|
||||
}
|
||||
|
||||
|
||||
sub submit : Chained('release') PathPart('submit') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireProjectOwner($c, $c->stash->{project});
|
||||
|
||||
if (($c->request->params->{action} || "") eq "delete") {
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
$c->stash->{release}->delete;
|
||||
});
|
||||
$c->res->redirect($c->uri_for($c->controller('Project')->action_for('releases'),
|
||||
[$c->stash->{project}->name]));
|
||||
} else {
|
||||
txn_do($c->model('DB')->schema, sub {
|
||||
updateRelease($c, $c->stash->{release});
|
||||
});
|
||||
$c->res->redirect($c->uri_for($self->action_for("view"),
|
||||
[$c->stash->{project}->name, $c->stash->{release}->name]));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
@@ -1,288 +1,105 @@
|
||||
package Hydra::Controller::Root;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::ListBuilds';
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::View::TT;
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
use Number::Bytes::Human qw(format_bytes);
|
||||
use Encode;
|
||||
use File::Basename;
|
||||
use JSON::MaybeXS;
|
||||
use HTML::Entities;
|
||||
use IPC::Run3;
|
||||
use List::Util qw[min max];
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||
|
||||
# Put this controller at top-level.
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
|
||||
|
||||
sub noLoginNeeded {
|
||||
my ($c) = @_;
|
||||
|
||||
my $hostname = $c->request->headers->header('X-Forwarded-For') || $c->request->hostname;
|
||||
my $readonly_ips = $c->config->{readonly_ips} // "";
|
||||
my $whitelisted = any { $_ eq $hostname } split(/,/, $readonly_ips);
|
||||
|
||||
return $whitelisted ||
|
||||
$c->request->path eq "api/push-github" ||
|
||||
$c->request->path eq "api/push-gitea" ||
|
||||
$c->request->path eq "google-login" ||
|
||||
$c->request->path eq "github-redirect" ||
|
||||
$c->request->path eq "github-login" ||
|
||||
$c->request->path eq "login" ||
|
||||
$c->request->path eq "logo" ||
|
||||
$c->request->path =~ /^static\//;
|
||||
}
|
||||
|
||||
|
||||
sub begin :Private {
|
||||
my ($self, $c, @args) = @_;
|
||||
|
||||
$c->stash->{curUri} = $c->request->uri;
|
||||
$c->stash->{version} = $ENV{"HYDRA_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixVersion} = $ENV{"NIX_RELEASE"} || "<devel>";
|
||||
$c->stash->{nixEvalJobsVersion} = $ENV{"NIX_EVAL_JOBS_RELEASE"} || "<devel>";
|
||||
$c->stash->{curTime} = time;
|
||||
$c->stash->{logo} = defined $c->config->{hydra_logo} ? "/logo" : "";
|
||||
$c->stash->{tracker} = defined $c->config->{tracker} ? $c->config->{tracker} : "";
|
||||
$c->stash->{flashMsg} = $c->flash->{flashMsg};
|
||||
$c->stash->{successMsg} = $c->flash->{successMsg};
|
||||
$c->stash->{localStore} = isLocalStore;
|
||||
|
||||
$c->stash->{isPrivateHydra} = $c->config->{private} // "0" ne "0";
|
||||
|
||||
if ($c->stash->{isPrivateHydra} && ! noLoginNeeded($c)) {
|
||||
requireUser($c);
|
||||
}
|
||||
$c->stash->{logo} = $ENV{"HYDRA_LOGO"} ? "/logo" : "/static/images/hydra.png" ;
|
||||
$c->stash->{tracker} = $ENV{"HYDRA_TRACKER"} ;
|
||||
|
||||
if (scalar(@args) == 0 || $args[0] ne "static") {
|
||||
$c->stash->{nrRunningBuilds} = dbh($c)->selectrow_array(
|
||||
"select count(distinct build) from buildsteps where busy != 0");
|
||||
$c->stash->{nrQueuedBuilds} = $c->model('DB::Builds')->search({ finished => 0 })->count();
|
||||
$c->stash->{nrRunningBuilds} = $c->model('DB::Builds')->search({ finished => 0, busy => 1 }, {})->count();
|
||||
$c->stash->{nrQueuedBuilds} = $c->model('DB::Builds')->search({ finished => 0 })->count();
|
||||
}
|
||||
|
||||
# Gather the supported input types.
|
||||
$c->stash->{inputTypes} = {
|
||||
'string' => 'String value',
|
||||
'boolean' => 'Boolean',
|
||||
'nix' => 'Nix expression',
|
||||
'build' => 'Previous Hydra build',
|
||||
'sysbuild' => 'Previous Hydra build (same system)',
|
||||
'eval' => 'Previous Hydra evaluation'
|
||||
};
|
||||
$_->supportedInputTypes($c->stash->{inputTypes}) foreach @{$c->hydra_plugins};
|
||||
|
||||
# XSRF protection: require POST requests to have the same origin.
|
||||
if ($c->req->method eq "POST" && $c->req->path ne "api/push-github" && $c->req->path ne "api/push-gitea") {
|
||||
my $referer = $c->req->header('Referer');
|
||||
$referer //= $c->req->header('Origin');
|
||||
my $base = $c->req->base;
|
||||
die unless $base =~ /\/$/;
|
||||
$referer .= "/";
|
||||
error($c, "POST requests should come from ‘$base’.")
|
||||
unless defined $referer && substr($referer, 0, length $base) eq $base;
|
||||
}
|
||||
|
||||
$c->forward('deserialize');
|
||||
|
||||
$c->stash->{params} = $c->request->data or $c->request->params;
|
||||
unless (defined $c->stash->{params} and %{$c->stash->{params}}) {
|
||||
$c->stash->{params} = $c->request->params;
|
||||
}
|
||||
|
||||
# Set the Vary header to "Accept" to ensure that browsers don't
|
||||
# mix up HTML and JSON responses.
|
||||
$c->response->headers->header('Vary', 'Accept');
|
||||
}
|
||||
|
||||
|
||||
sub deserialize :ActionClass('Deserialize') { }
|
||||
|
||||
|
||||
sub index :Path :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'overview.tt';
|
||||
$c->stash->{projects} = [$c->model('DB::Projects')->search({}, {order_by => ['enabled DESC', 'name']})];
|
||||
$c->stash->{projects} = [$c->model('DB::Projects')->search(isAdmin($c) ? {} : {hidden => 0}, {order_by => 'name'})];
|
||||
$c->stash->{newsItems} = [$c->model('DB::NewsItems')->search({}, { order_by => ['createtime DESC'], rows => 5 })];
|
||||
$self->status_ok($c,
|
||||
entity => $c->stash->{projects}
|
||||
);
|
||||
# $c->stash->{nrbuilds} = [nrbuildsQuery($c, 30, "day", "", "", "", "")];
|
||||
# <img src="http://chart.apis.google.com/chart?cht=bvg&chtt=Nr%20builds%20over%20the%20last%2030%20days&chs=300x100&chd=t:1785,881,2863,2828,1472,2847,1449,5634,1625,1200,1576,700,839,8533,1439,361,991,1337,1234,1322,1883,2146,1553,883,378,1395,1204,527,1147,124&chco=BBCEBB&chds=0,8533&chbh=a&chxt=y&chxr=0,0,8533"/>
|
||||
}
|
||||
|
||||
|
||||
sub queue :Local :Args(0) :ActionClass('REST') { }
|
||||
sub login :Local {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
sub queue_GET {
|
||||
my $username = $c->request->params->{username} || "";
|
||||
my $password = $c->request->params->{password} || "";
|
||||
|
||||
if ($username eq "" && $password eq "" && ! defined $c->flash->{referer}) {
|
||||
my $baseurl = $c->uri_for('/');
|
||||
my $refurl = $c->request->referer;
|
||||
$c->flash->{referer} = $refurl if $refurl =~ m/^($baseurl)/;
|
||||
}
|
||||
|
||||
if ($username && $password) {
|
||||
if ($c->authenticate({username => $username, password => $password})) {
|
||||
$c->response->redirect($c->flash->{referer} || $c->uri_for('/'));
|
||||
$c->flash->{referer} = undef;
|
||||
return;
|
||||
}
|
||||
$c->stash->{errorMsg} = "Bad username or password.";
|
||||
}
|
||||
|
||||
$c->stash->{template} = 'login.tt';
|
||||
}
|
||||
|
||||
|
||||
sub logout :Local {
|
||||
my ($self, $c) = @_;
|
||||
$c->logout;
|
||||
$c->response->redirect($c->request->referer || $c->uri_for('/'));
|
||||
}
|
||||
|
||||
|
||||
sub queue :Local {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'queue.tt';
|
||||
$c->stash->{flashMsg} //= $c->flash->{buildMsg};
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [$c->model('DB::Builds')->search(
|
||||
{ finished => 0 },
|
||||
{ order_by => ["globalpriority desc", "id"],
|
||||
, columns => [@buildListColumns]
|
||||
})]
|
||||
);
|
||||
$c->stash->{queue} = [$c->model('DB::Builds')->search(
|
||||
{finished => 0}, { join => ['project'], order_by => ["priority DESC", "timestamp"], columns => [@buildListColumns], '+select' => ['project.enabled'], '+as' => ['enabled'] })];
|
||||
$c->stash->{flashMsg} = $c->flash->{buildMsg};
|
||||
}
|
||||
|
||||
|
||||
sub queue_summary :Local :Path('queue-summary') :Args(0) {
|
||||
sub timeline :Local {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'queue-summary.tt';
|
||||
my $pit = time();
|
||||
$c->stash->{pit} = $pit;
|
||||
$pit = $pit-(24*60*60)-1;
|
||||
|
||||
$c->stash->{queued} = dbh($c)->selectall_arrayref(
|
||||
"select jobsets.project as project, jobsets.name as jobset, count(*) as queued, min(timestamp) as oldest, max(timestamp) as newest from Builds " .
|
||||
"join Jobsets jobsets on jobsets.id = builds.jobset_id " .
|
||||
"where finished = 0 group by jobsets.project, jobsets.name order by queued desc",
|
||||
{ Slice => {} });
|
||||
|
||||
$c->stash->{systems} = dbh($c)->selectall_arrayref(
|
||||
"select system, count(*) as c from Builds where finished = 0 group by system order by c desc",
|
||||
{ Slice => {} });
|
||||
$c->stash->{template} = 'timeline.tt';
|
||||
$c->stash->{builds} = [ $c->model('DB::Builds')->search
|
||||
( { finished => 1, stoptime => { '>' => $pit } }
|
||||
, { order_by => ["starttime"] }
|
||||
) ];
|
||||
}
|
||||
|
||||
|
||||
sub status :Local :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub status_GET {
|
||||
sub status :Local {
|
||||
my ($self, $c) = @_;
|
||||
$self->status_ok(
|
||||
$c,
|
||||
entity => [$c->model('DB::Builds')->search(
|
||||
{ "buildsteps.busy" => { '!=', 0 } },
|
||||
{ order_by => ["globalpriority DESC", "id"],
|
||||
join => "buildsteps",
|
||||
columns => [@buildListColumns, 'buildsteps.drvpath', 'buildsteps.type']
|
||||
})]
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub queue_runner_status :Local :Path('queue-runner-status') :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub queue_runner_status_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
#my $status = from_json($c->model('DB::SystemStatus')->find('queue-runner')->status);
|
||||
my ($stdout, $stderr);
|
||||
run3(['hydra-queue-runner', '--status'], \undef, \$stdout, \$stderr);
|
||||
my $status;
|
||||
if ($? != 0) {
|
||||
$status = { status => "unknown" };
|
||||
} else {
|
||||
$status = decode_json($stdout);
|
||||
}
|
||||
my $json = JSON->new->pretty()->canonical();
|
||||
|
||||
$c->stash->{template} = 'queue-runner-status.tt';
|
||||
$c->stash->{status} = $json->encode($status);
|
||||
$self->status_ok($c, entity => $status);
|
||||
}
|
||||
|
||||
|
||||
sub machines :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $machines = getMachines;
|
||||
|
||||
# Add entry for localhost. The implicit addition is not needed with queue runner v2
|
||||
if (not $c->config->{'queue_runner_endpoint'}) {
|
||||
$machines->{''} //= {};
|
||||
}
|
||||
delete $machines->{'localhost'};
|
||||
|
||||
my $status = $c->model('DB::SystemStatus')->find("queue-runner");
|
||||
if ($status) {
|
||||
my $ms = decode_json($status->status)->{"machines"};
|
||||
foreach my $name (keys %{$ms}) {
|
||||
$name = "" if $name eq "localhost";
|
||||
my $outName = $name;
|
||||
$outName = "" if $name eq "ssh://localhost";
|
||||
$machines->{$outName} //= {disabled => 1};
|
||||
$machines->{$outName}->{nrStepsDone} = $ms->{$name}->{nrStepsDone};
|
||||
$machines->{$outName}->{avgStepBuildTime} = $ms->{$name}->{avgStepBuildTime} // 0;
|
||||
}
|
||||
}
|
||||
|
||||
$c->stash->{machines} = $machines;
|
||||
$c->stash->{steps} = dbh($c)->selectall_arrayref(
|
||||
"select build, stepnr, s.system as system, s.drvpath as drvpath, machine, s.starttime as starttime, jobsets.project as project, jobsets.name as jobset, job, s.busy as busy " .
|
||||
"from BuildSteps s " .
|
||||
"join Builds b on s.build = b.id " .
|
||||
"join Jobsets jobsets on jobsets.id = b.jobset_id " .
|
||||
"where busy != 0 order by machine, stepnr",
|
||||
{ Slice => {} });
|
||||
$c->stash->{template} = 'machine-status.tt';
|
||||
$c->stash->{human_bytes} = sub {
|
||||
my ($bytes) = @_;
|
||||
return format_bytes($bytes, si => 1);
|
||||
};
|
||||
$c->stash->{pretty_load} = sub {
|
||||
my ($load) = @_;
|
||||
return sprintf('%.2f', $load);
|
||||
};
|
||||
$c->stash->{pretty_percent} = sub {
|
||||
my ($percent) = @_;
|
||||
my $ret = sprintf('%.2f', $percent);
|
||||
return (' ' x (6 - length($ret))) . encode_entities($ret);
|
||||
};
|
||||
$self->status_ok($c, entity => $c->stash->{machines});
|
||||
}
|
||||
|
||||
sub prometheus :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $machines = getMachines;
|
||||
|
||||
my $client = Net::Prometheus->new;
|
||||
my $duration = $client->new_histogram(
|
||||
name => "hydra_machine_build_duration",
|
||||
help => "How long builds are taking per server. Note: counts are gauges, NOT counters.",
|
||||
labels => [ "machine" ],
|
||||
buckets => [
|
||||
60,
|
||||
600,
|
||||
1800,
|
||||
3600,
|
||||
7200,
|
||||
21600,
|
||||
43200,
|
||||
86400,
|
||||
172800,
|
||||
259200,
|
||||
345600,
|
||||
518400,
|
||||
604800,
|
||||
691200
|
||||
]
|
||||
);
|
||||
|
||||
my $steps = dbh($c)->selectall_arrayref(
|
||||
"select machine, s.starttime as starttime " .
|
||||
"from BuildSteps s join Builds b on s.build = b.id " .
|
||||
"where busy != 0 order by machine, stepnr",
|
||||
{ Slice => {} });
|
||||
|
||||
foreach my $step (@$steps) {
|
||||
my $name = $step->{machine} ? Hydra::View::TT->stripSSHUser(undef, $step->{machine}) : "";
|
||||
$name = "localhost" unless $name;
|
||||
$duration->labels($name)->observe(time - $step->{starttime});
|
||||
}
|
||||
|
||||
$c->stash->{'plain'} = { data => $client->render };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
$c->stash->{steps} = [ $c->model('DB::BuildSteps')->search(
|
||||
{ 'me.busy' => 1, 'build.finished' => 0, 'build.busy' => 1 },
|
||||
{ join => [ 'build' ]
|
||||
, order_by => [ 'machine' ]
|
||||
} ) ];
|
||||
}
|
||||
|
||||
|
||||
@@ -290,15 +107,58 @@ sub prometheus :Local Args(0) {
|
||||
sub get_builds : Chained('/') PathPart('') CaptureArgs(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{allBuilds} = $c->model('DB::Builds');
|
||||
$c->stash->{jobStatus} = $c->model('DB')->resultset('JobStatus');
|
||||
$c->stash->{allJobsets} = $c->model('DB::Jobsets');
|
||||
$c->stash->{allJobs} = $c->model('DB::Jobs');
|
||||
$c->stash->{latestSucceeded} = $c->model('DB')->resultset('LatestSucceeded');
|
||||
$c->stash->{channelBaseName} = "everything";
|
||||
$c->stash->{total} = $c->model('DB::NrBuilds')->find('finished')->count;
|
||||
}
|
||||
|
||||
|
||||
sub robots_txt : Path('robots.txt') {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{'plain'} = { data => "User-agent: *\nDisallow: /*\n" };
|
||||
|
||||
sub uri_for {
|
||||
my ($controller, $action, @args) = @_;
|
||||
return $c->uri_for($c->controller($controller)->action_for($action), @args)->path;
|
||||
}
|
||||
|
||||
sub channelUris {
|
||||
my ($controller, $bindings) = @_;
|
||||
return
|
||||
( uri_for($controller, 'closure', $bindings, "*")
|
||||
, uri_for($controller, 'manifest', $bindings)
|
||||
, uri_for($controller, 'pkg', $bindings, "*")
|
||||
, uri_for($controller, 'nixexprs', $bindings)
|
||||
, uri_for($controller, 'channel_contents', $bindings)
|
||||
);
|
||||
}
|
||||
|
||||
# Put actions that are expensive or not useful for indexing in
|
||||
# robots.txt. Note: wildcards are not universally supported in
|
||||
# robots.txt, but apparently Google supports them.
|
||||
my @rules =
|
||||
( uri_for('Build', 'buildtimedeps', ["*"])
|
||||
, uri_for('Build', 'runtimedeps', ["*"])
|
||||
, uri_for('Build', 'deps', ["*"])
|
||||
, uri_for('Build', 'view_nixlog', ["*"], "*")
|
||||
, uri_for('Build', 'view_log', ["*"], "*")
|
||||
, uri_for('Build', 'view_log', ["*"])
|
||||
, uri_for('Build', 'download', ["*"], "*")
|
||||
, uri_for('Root', 'nar', [], "*")
|
||||
, uri_for('Root', 'status', [])
|
||||
, uri_for('Root', 'all', [])
|
||||
, uri_for('API', 'scmdiff', [])
|
||||
, uri_for('API', 'logdiff', [],"*", "*")
|
||||
, uri_for('Project', 'all', ["*"])
|
||||
, channelUris('Root', ["*"])
|
||||
, channelUris('Project', ["*", "*"])
|
||||
, channelUris('Jobset', ["*", "*", "*"])
|
||||
, channelUris('Job', ["*", "*", "*", "*"])
|
||||
, channelUris('Build', ["*"])
|
||||
);
|
||||
|
||||
$c->stash->{'plain'} = { data => "User-agent: *\n" . join('', map { "Disallow: $_\n" } @rules) };
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
@@ -312,300 +172,62 @@ sub default :Path {
|
||||
sub end : ActionClass('RenderView') {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
if (defined $c->stash->{json}) {
|
||||
if (scalar @{$c->error}) {
|
||||
# FIXME: dunno why we need to do decode_utf8 here.
|
||||
$c->stash->{json}->{error} = join "\n", map { decode_utf8($_); } @{$c->error};
|
||||
$c->clear_errors;
|
||||
}
|
||||
$c->forward('View::JSON');
|
||||
}
|
||||
|
||||
elsif (scalar @{$c->error}) {
|
||||
$c->stash->{resource} = { error => join "\n", @{$c->error} };
|
||||
if ($c->stash->{lazy}) {
|
||||
$c->response->headers->header('X-Hydra-Lazy', 'Yes');
|
||||
$c->stash->{template} = 'lazy_error.tt';
|
||||
}
|
||||
else {
|
||||
$c->stash->{template} = 'error.tt';
|
||||
}
|
||||
if (scalar @{$c->error}) {
|
||||
$c->stash->{template} = 'error.tt';
|
||||
$c->stash->{errors} = $c->error;
|
||||
$c->response->status(500) if $c->response->status == 200;
|
||||
if ($c->response->status >= 300) {
|
||||
$c->stash->{httpStatus} =
|
||||
$c->response->status . " " . HTTP::Status::status_message($c->response->status);
|
||||
}
|
||||
$c->clear_errors;
|
||||
}
|
||||
|
||||
$c->forward('serialize') if defined $c->stash->{resource};
|
||||
}
|
||||
|
||||
|
||||
sub serialize : ActionClass('Serialize') { }
|
||||
|
||||
|
||||
sub nar :Local :Args(1) {
|
||||
my ($self, $c, $path) = @_;
|
||||
|
||||
die if $path =~ /\//;
|
||||
$path = ($ENV{NIX_STORE_DIR} || "/nix/store")."/$path";
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
if (!isValidPath($path)) {
|
||||
$c->response->status(410); # "Gone"
|
||||
error($c, "Path " . $path . " is no longer available.");
|
||||
}
|
||||
|
||||
else {
|
||||
$path = $Nix::Config::storeDir . "/$path";
|
||||
|
||||
gone($c, "Path " . $path . " is no longer available.") unless $MACHINE_LOCAL_STORE->isValidPath($path);
|
||||
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $path;
|
||||
}
|
||||
$c->stash->{current_view} = 'NixNAR';
|
||||
$c->stash->{storePath} = $path;
|
||||
}
|
||||
|
||||
|
||||
sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
||||
sub change_password : Path('change-password') : Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
requireLogin($c) if !$c->user_exists;
|
||||
|
||||
else {
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} =
|
||||
"StoreDir: $Nix::Config::storeDir\n" .
|
||||
"WantMassQuery: 0\n" .
|
||||
# Give Hydra binary caches a very low priority (lower than the
|
||||
# static binary cache http://nixos.org/binary-cache).
|
||||
"Priority: 100\n";
|
||||
setCacheHeaders($c, 24 * 60 * 60);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
$c->stash->{template} = 'change-password.tt';
|
||||
}
|
||||
|
||||
sub change_password_submit : Path('change-password/submit') : Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
my ($self, $c, $realisation) = @_;
|
||||
requireLogin($c) if !$c->user_exists;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
my $password = $c->request->params->{"password"};
|
||||
my $password_check = $c->request->params->{"password_check"};
|
||||
print STDERR "$password \n";
|
||||
print STDERR "$password_check \n";
|
||||
error($c, "Passwords did not match, go back and try again!") if $password ne $password_check;
|
||||
|
||||
else {
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = $MACHINE_LOCAL_STORE->queryRawRealisation($rawDrvOutput);
|
||||
my $hashed = sha1_hex($password);
|
||||
$c->user->update({ password => $hashed}) ;
|
||||
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = $rawRealisation;
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
$c->res->redirect("/");
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($self, $c, $narinfo) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my ($hash) = $narinfo =~ NARINFO_REGEX;
|
||||
|
||||
die("Hash length was not 32") if length($hash) != 32;
|
||||
my $path = $MACHINE_LOCAL_STORE->queryPathFromHashPart($hash);
|
||||
|
||||
if (!$path) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->stash->{storePath} = $path;
|
||||
$c->forward('Hydra::View::NARInfo');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub logo :Local {
|
||||
my ($self, $c) = @_;
|
||||
my $path = $c->config->{hydra_logo} // die("Logo not set!");
|
||||
my $path = $ENV{"HYDRA_LOGO"} or die("Logo not set!");
|
||||
$c->serve_static_file($path);
|
||||
}
|
||||
|
||||
|
||||
sub evals :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'evals.tt';
|
||||
|
||||
my $page = int($c->req->param('page') || "1") || 1;
|
||||
|
||||
my $resultsPerPage = 20;
|
||||
|
||||
my $evals = $c->model('DB::JobsetEvals');
|
||||
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{total} = $evals->search({hasnewbuilds => 1})->count;
|
||||
$c->stash->{evals} = getEvals($c, $evals, ($page - 1) * $resultsPerPage, $resultsPerPage);
|
||||
|
||||
$self->status_ok($c, entity => $c->stash->{evals});
|
||||
}
|
||||
|
||||
|
||||
sub steps :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
$c->stash->{template} = 'steps.tt';
|
||||
|
||||
my $page = int($c->req->param('page') || "1") || 1;
|
||||
|
||||
my $resultsPerPage = 20;
|
||||
|
||||
$c->stash->{page} = $page;
|
||||
$c->stash->{resultsPerPage} = $resultsPerPage;
|
||||
$c->stash->{steps} = [ $c->model('DB::BuildSteps')->search(
|
||||
{ starttime => { '!=', undef },
|
||||
stoptime => { '!=', undef }
|
||||
},
|
||||
{ order_by => [ "stoptime desc" ],
|
||||
rows => $resultsPerPage,
|
||||
offset => ($page - 1) * $resultsPerPage
|
||||
}) ];
|
||||
|
||||
$c->stash->{total} = approxTableSize($c, "IndexBuildStepsOnStopTime");
|
||||
}
|
||||
|
||||
|
||||
sub search :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'search.tt';
|
||||
|
||||
my $query = trim $c->request->params->{"query"};
|
||||
|
||||
error($c, "Query is empty.") if $query eq "";
|
||||
error($c, "Invalid character in query.")
|
||||
unless $query =~ /^[a-zA-Z0-9_\-\/.]+$/;
|
||||
|
||||
my $limit = int(trim ($c->request->params->{"limit"} || "10"));
|
||||
$c->stash->{limit} = min(50, max(1, $limit));
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
$c->model('DB')->schema->storage->dbh->do("SET LOCAL statement_timeout = 20000");
|
||||
$c->stash->{projects} = [ $c->model('DB::Projects')->search(
|
||||
{ -and =>
|
||||
[ { -or => [ name => { ilike => "%$query%" }, displayName => { ilike => "%$query%" }, description => { ilike => "%$query%" } ] }
|
||||
, { hidden => 0 }
|
||||
]
|
||||
},
|
||||
{ order_by => ["name"] } ) ];
|
||||
|
||||
$c->stash->{jobsets} = [ $c->model('DB::Jobsets')->search(
|
||||
{ -and =>
|
||||
[ { -or => [ "me.name" => { ilike => "%$query%" }, "me.description" => { ilike => "%$query%" } ] }
|
||||
, { "project.hidden" => 0, "me.hidden" => 0 }
|
||||
]
|
||||
},
|
||||
{ order_by => ["project", "name"], join => ["project"] } ) ];
|
||||
|
||||
$c->stash->{jobs} = [ $c->model('DB::Builds')->search(
|
||||
{ "job" => { ilike => "%$query%" }
|
||||
, "project.hidden" => 0
|
||||
, "jobset.hidden" => 0
|
||||
, iscurrent => 1
|
||||
},
|
||||
{
|
||||
order_by => ["jobset.project", "jobset.name", "job"],
|
||||
join => { "jobset" => "project" },
|
||||
rows => $c->stash->{limit} + 1
|
||||
} )
|
||||
];
|
||||
|
||||
# Perform build search in separate queries to prevent seq scan on buildoutputs table.
|
||||
$c->stash->{builds} = [ $c->model('DB::Builds')->search(
|
||||
{ "buildoutputs.path" => { ilike => "%$query%" } },
|
||||
{ order_by => ["id desc"], join => ["buildoutputs"]
|
||||
, rows => $c->stash->{limit}
|
||||
} ) ];
|
||||
|
||||
$c->stash->{buildsdrv} = [ $c->model('DB::Builds')->search(
|
||||
{ "drvpath" => { ilike => "%$query%" } },
|
||||
{ order_by => ["id desc"]
|
||||
, rows => $c->stash->{limit}
|
||||
} ) ];
|
||||
|
||||
$c->stash->{resource} = { projects => $c->stash->{projects},
|
||||
jobsets => $c->stash->{jobsets},
|
||||
builds => $c->stash->{builds},
|
||||
buildsdrv => $c->stash->{buildsdrv} };
|
||||
});
|
||||
}
|
||||
|
||||
sub serveLogFile {
|
||||
my ($c, $logPath, $tail) = @_;
|
||||
$c->stash->{logPath} = $logPath;
|
||||
$c->stash->{tail} = $tail;
|
||||
$c->forward('Hydra::View::NixLog');
|
||||
}
|
||||
|
||||
sub log :Local :Args(1) {
|
||||
my ($self, $c, $drvPath) = @_;
|
||||
|
||||
$drvPath = "/nix/store/$drvPath";
|
||||
|
||||
my $tail = $c->request->params->{"tail"};
|
||||
|
||||
die if defined $tail && $tail !~ /^[0-9]+$/;
|
||||
|
||||
my $logFile = findLog($c, $drvPath);
|
||||
|
||||
if (defined $logFile) {
|
||||
serveLogFile($c, $logFile, $tail);
|
||||
return;
|
||||
}
|
||||
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
}
|
||||
|
||||
sub runcommandlog :Local :Args(1) {
|
||||
my ($self, $c, $uuid) = @_;
|
||||
|
||||
my $tail = $c->request->params->{"tail"};
|
||||
|
||||
die if defined $tail && $tail !~ /^[0-9]+$/;
|
||||
|
||||
my $runlog = $c->model('DB')->resultset('RunCommandLogs')->find({ uuid => $uuid })
|
||||
or notFound($c, "The RunCommand log is not available.");
|
||||
|
||||
my $logFile = constructRunCommandLogPath($runlog);
|
||||
if (-f $logFile) {
|
||||
serveLogFile($c, $logFile, $tail);
|
||||
return;
|
||||
} else {
|
||||
notFound($c, "The RunCommand log is not available.");
|
||||
}
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
@@ -1,487 +0,0 @@
|
||||
package Hydra::Controller::User;
|
||||
|
||||
use utf8;
|
||||
use strict;
|
||||
use warnings;
|
||||
use base 'Hydra::Base::Controller::REST';
|
||||
use File::Slurper qw(read_text);
|
||||
use Crypt::RandPasswd;
|
||||
use Digest::SHA1 qw(sha1_hex);
|
||||
use Hydra::Config qw(getLDAPConfigAmbient);
|
||||
use Hydra::Helper::Nix;
|
||||
use Hydra::Helper::CatalystUtils;
|
||||
use Hydra::Helper::Email;
|
||||
use LWP::UserAgent;
|
||||
use JSON::MaybeXS;
|
||||
use HTML::Entities;
|
||||
use Encode qw(decode);
|
||||
|
||||
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
|
||||
|
||||
sub login :Local :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub login_POST {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $username = $c->stash->{params}->{username} // "";
|
||||
my $password = $c->stash->{params}->{password} // "";
|
||||
|
||||
badRequest($c, "You must specify a user name.") if $username eq "";
|
||||
badRequest($c, "You must specify a password.") if $password eq "";
|
||||
|
||||
if ($c->get_auth_realm('ldap') && $c->authenticate({username => $username, password => $password}, 'ldap')) {
|
||||
doLDAPLogin($self, $c, $username);
|
||||
} elsif ($c->authenticate({username => $username, password => $password})) {}
|
||||
else {
|
||||
accessDenied($c, "Bad username or password.")
|
||||
}
|
||||
|
||||
$self->status_found(
|
||||
$c,
|
||||
location => $c->uri_for("current-user"),
|
||||
entity => $c->model("DB::Users")->find($c->user->username)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub logout :Local :Args(0) :ActionClass('REST') { }
|
||||
|
||||
sub logout_POST {
|
||||
my ($self, $c) = @_;
|
||||
$c->flash->{flashMsg} = "You are no longer signed in." if $c->user_exists();
|
||||
$c->logout;
|
||||
$self->status_no_content($c);
|
||||
}
|
||||
|
||||
sub doLDAPLogin {
|
||||
my ($self, $c, $username) = @_;
|
||||
my $user = $c->find_user({ username => $username });
|
||||
my $LDAPUser = $c->find_user({ username => $username }, 'ldap');
|
||||
my @LDAPRoles = $LDAPUser->roles;
|
||||
my $role_mapping = getLDAPConfigAmbient()->{"role_mapping"};
|
||||
|
||||
if (!$user) {
|
||||
$c->model('DB::Users')->create(
|
||||
{ username => $username
|
||||
, fullname => decode('UTF-8', $LDAPUser->cn)
|
||||
, password => "!"
|
||||
, emailaddress => $LDAPUser->mail
|
||||
, type => "LDAP"
|
||||
});
|
||||
$user = $c->find_user({ username => $username }) or die;
|
||||
} else {
|
||||
$user->update(
|
||||
{ fullname => decode('UTF-8', $LDAPUser->cn)
|
||||
, password => "!"
|
||||
, emailaddress => $LDAPUser->mail
|
||||
, type => "LDAP"
|
||||
});
|
||||
}
|
||||
$user->userroles->delete;
|
||||
foreach my $ldap_role (@LDAPRoles) {
|
||||
if (defined($role_mapping->{$ldap_role})) {
|
||||
my $roles = $role_mapping->{$ldap_role};
|
||||
for my $mapped_role (@$roles) {
|
||||
$user->userroles->create({ role => $mapped_role });
|
||||
}
|
||||
}
|
||||
}
|
||||
$c->set_authenticated($user);
|
||||
}
|
||||
|
||||
sub doEmailLogin {
|
||||
my ($self, $c, $type, $email, $fullName) = @_;
|
||||
|
||||
die "No email address provided.\n" unless defined $email;
|
||||
|
||||
# Be paranoid about the email address format, since we do use it
|
||||
# in URLs.
|
||||
die "Illegal email address.\n" unless $email =~ /^[a-zA-Z0-9\.\-\_]+@[a-zA-Z0-9\.\-\_]+$/;
|
||||
|
||||
# If allowed_domains is set, check if the email address
|
||||
# returned is on these domains. When not configured, allow all
|
||||
# domains.
|
||||
my $allowed_domains = $c->config->{allowed_domains} // ($c->config->{persona_allowed_domains} // "");
|
||||
if ($allowed_domains ne "") {
|
||||
my $email_ok = 0;
|
||||
my @domains = split ',', $allowed_domains;
|
||||
map { $_ =~ s/^\s*(.*?)\s*$/$1/ } @domains;
|
||||
|
||||
foreach my $domain (@domains) {
|
||||
$email_ok = $email_ok || ((split '@', $email)[1] eq $domain);
|
||||
}
|
||||
error($c, "Your email address does not belong to a domain that is allowed to log in.\n")
|
||||
unless $email_ok;
|
||||
}
|
||||
|
||||
my $user = $c->find_user({ username => $email });
|
||||
|
||||
if ($user) {
|
||||
# Automatically upgrade legacy Persona accounts to Google accounts.
|
||||
if ($user->type eq "persona" && $type eq "google") {
|
||||
$user->update({type => "google"});
|
||||
}
|
||||
|
||||
die "You cannot login via login type '$type'.\n" if $user->type ne $type;
|
||||
} else {
|
||||
$c->model('DB::Users')->create(
|
||||
{ username => $email
|
||||
, fullname => $fullName,
|
||||
, password => "!"
|
||||
, emailaddress => $email,
|
||||
, type => $type
|
||||
});
|
||||
$user = $c->find_user({ username => $email }) or die;
|
||||
}
|
||||
|
||||
$c->set_authenticated($user);
|
||||
|
||||
$self->status_no_content($c);
|
||||
$c->flash->{successMsg} = "You are now signed in as <tt>" . encode_entities($email) . "</tt>.";
|
||||
}
|
||||
|
||||
|
||||
sub google_login :Path('/google-login') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
requirePost($c);
|
||||
|
||||
error($c, "Logging in via Google is not enabled.") unless $c->config->{enable_google_login};
|
||||
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $response = $ua->post(
|
||||
'https://www.googleapis.com/oauth2/v3/tokeninfo',
|
||||
{ id_token => ($c->stash->{params}->{id_token} // die "No token."),
|
||||
});
|
||||
error($c, "Did not get a response from Google.") unless $response->is_success;
|
||||
|
||||
my $data = decode_json($response->decoded_content) or die;
|
||||
|
||||
die unless $data->{aud} eq $c->config->{google_client_id};
|
||||
die "Email address is not verified" unless $data->{email_verified};
|
||||
# FIXME: verify hosted domain claim?
|
||||
|
||||
doEmailLogin($self, $c, "google", $data->{email}, $data->{name} // undef);
|
||||
}
|
||||
|
||||
sub github_login :Path('/github-login') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $client_id = $c->config->{github_client_id} or die "github_client_id not configured.";
|
||||
my $client_secret = $c->config->{github_client_secret} // do {
|
||||
my $client_secret_file = $c->config->{github_client_secret_file} or die "github_client_secret nor github_client_secret_file is configured.";
|
||||
my $client_secret = read_text($client_secret_file);
|
||||
$client_secret =~ s/\s+//;
|
||||
$client_secret;
|
||||
};
|
||||
die "No github secret configured" unless $client_secret;
|
||||
|
||||
my $ua = LWP::UserAgent->new();
|
||||
my $response = $ua->post(
|
||||
'https://github.com/login/oauth/access_token',
|
||||
{
|
||||
client_id => $client_id,
|
||||
client_secret => $client_secret,
|
||||
code => ($c->req->params->{code} // die "No token."),
|
||||
}, Accept => 'application/json');
|
||||
error($c, "Did not get a response from GitHub.") unless $response->is_success;
|
||||
|
||||
my $data = decode_json($response->decoded_content) or die;
|
||||
my $access_token = $data->{access_token} // die "No access_token in response from GitHub.";
|
||||
|
||||
$response = $ua->get('https://api.github.com/user/emails', Accept => 'application/vnd.github.v3+json', Authorization => "token $access_token");
|
||||
error($c, "Did not get a response from GitHub for email info.") unless $response->is_success;
|
||||
|
||||
$data = decode_json($response->decoded_content) or die;
|
||||
my $email;
|
||||
|
||||
foreach my $eml (@{$data}) {
|
||||
$email = $eml->{email} if $eml->{verified} && $eml->{primary};
|
||||
}
|
||||
|
||||
die "No primary email for this GitHub profile" unless $email;
|
||||
|
||||
$response = $ua->get('https://api.github.com/user', Authorization => "token $access_token");
|
||||
error($c, "Did not get a response from GitHub for user info.") unless $response->is_success;
|
||||
$data = decode_json($response->decoded_content) or die;
|
||||
|
||||
doEmailLogin($self, $c, "github", $email, $data->{name} // undef);
|
||||
|
||||
$c->res->redirect($c->uri_for($c->res->cookies->{'after_github'}));
|
||||
}
|
||||
|
||||
sub github_redirect :Path('/github-redirect') Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
my $client_id = $c->config->{github_client_id} or die "github_client_id not configured.";
|
||||
|
||||
my $after = "/" . $c->req->params->{after};
|
||||
|
||||
$c->res->cookies->{'after_github'} = {
|
||||
name => 'after_github',
|
||||
value => $after,
|
||||
};
|
||||
|
||||
$c->res->redirect("https://github.com/login/oauth/authorize?client_id=$client_id&scope=user:email");
|
||||
}
|
||||
|
||||
|
||||
sub captcha :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->create_captcha();
|
||||
}
|
||||
|
||||
|
||||
sub isValidPassword {
|
||||
my ($password) = @_;
|
||||
return length($password) >= 6;
|
||||
}
|
||||
|
||||
|
||||
sub register :Local Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
accessDenied($c, "User registration is currently not implemented.") unless isAdmin($c);
|
||||
|
||||
if ($c->request->method eq "GET") {
|
||||
$c->stash->{template} = 'user.tt';
|
||||
$c->stash->{create} = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
die unless $c->request->method eq "PUT";
|
||||
|
||||
my $userName = trim $c->stash->{params}->{username};
|
||||
$c->stash->{username} = $userName;
|
||||
|
||||
error($c, "You did not enter the correct digits from the security image.")
|
||||
unless isAdmin($c) || $c->validate_captcha($c->req->param('captcha'));
|
||||
|
||||
error($c, "Your user name is invalid. It must start with a lower-case letter followed by lower-case letters, digits, dots or underscores.")
|
||||
if $userName !~ /^$userNameRE$/;
|
||||
|
||||
error($c, "Your user name is already taken.")
|
||||
if $c->find_user({ username => $userName });
|
||||
|
||||
$c->model('DB')->schema->txn_do(sub {
|
||||
my $user = $c->model('DB::Users')->create(
|
||||
{ username => $userName
|
||||
, password => "!"
|
||||
, emailaddress => "",
|
||||
, type => "hydra"
|
||||
});
|
||||
updatePreferences($c, $user);
|
||||
});
|
||||
|
||||
unless ($c->user_exists) {
|
||||
$c->set_authenticated({username => $userName})
|
||||
or error($c, "Unable to authenticate the new user!");
|
||||
}
|
||||
|
||||
$c->flash->{successMsg} = "User <tt>$userName</tt> has been created.";
|
||||
$self->status_no_content($c);
|
||||
}
|
||||
|
||||
|
||||
sub updatePreferences {
|
||||
my ($c, $user) = @_;
|
||||
|
||||
my $fullName = trim($c->stash->{params}->{fullname} // "");
|
||||
error($c, "Your must specify your full name.") if $fullName eq "";
|
||||
|
||||
my $password = trim($c->stash->{params}->{password} // "");
|
||||
if ($user->type eq "hydra" && ($user->password eq "!" || $password ne "")) {
|
||||
error($c, "You must specify a password of at least 6 characters.")
|
||||
unless isValidPassword($password);
|
||||
|
||||
error($c, "The passwords you specified did not match.")
|
||||
if $password ne trim $c->stash->{params}->{password2};
|
||||
|
||||
$user->setPassword($password);
|
||||
}
|
||||
|
||||
my $emailAddress = trim($c->stash->{params}->{emailaddress} // "");
|
||||
# FIXME: validate email address?
|
||||
|
||||
$user->update(
|
||||
{ fullname => $fullName
|
||||
, emailonerror => $c->stash->{params}->{"emailonerror"} ? 1 : 0
|
||||
, publicdashboard => $c->stash->{params}->{"publicdashboard"} ? 1 : 0
|
||||
});
|
||||
|
||||
if (isAdmin($c)) {
|
||||
$user->update({ emailaddress => $emailAddress })
|
||||
if $user->type eq "hydra";
|
||||
|
||||
$user->userroles->delete;
|
||||
$user->userroles->create({ role => $_ })
|
||||
foreach paramToList($c, "roles");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub currentUser :Path('/current-user') :ActionClass('REST') { }
|
||||
|
||||
sub currentUser_GET {
|
||||
my ($self, $c) = @_;
|
||||
|
||||
requireUser($c);
|
||||
|
||||
$self->status_ok($c,
|
||||
entity => $c->model("DB::Users")->find($c->user->username)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
sub user :Chained('/') PathPart('user') CaptureArgs(1) {
|
||||
my ($self, $c, $userName) = @_;
|
||||
|
||||
requireUser($c);
|
||||
|
||||
accessDenied($c, "You do not have permission to edit other users.")
|
||||
if $userName ne $c->user->username && !isAdmin($c);
|
||||
|
||||
$c->stash->{user} = $c->model('DB::Users')->find($userName)
|
||||
or notFound($c, "User $userName doesn't exist.");
|
||||
}
|
||||
|
||||
|
||||
sub edit :Chained('user') :PathPart('') :Args(0) :ActionClass('REST::ForBrowsers') { }
|
||||
|
||||
sub edit_GET {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'user.tt';
|
||||
}
|
||||
|
||||
sub edit_PUT {
|
||||
my ($self, $c) = @_;
|
||||
my $user = $c->stash->{user};
|
||||
|
||||
if (($c->stash->{params}->{submit} // "") eq "reset-password") {
|
||||
return;
|
||||
}
|
||||
|
||||
$c->model('Db')->schema->txn_do(sub {
|
||||
updatePreferences($c, $user);
|
||||
});
|
||||
|
||||
$c->flash->{successMsg} = "Your preferences have been updated.";
|
||||
$self->status_no_content($c);
|
||||
}
|
||||
|
||||
sub edit_DELETE {
|
||||
my ($self, $c) = @_;
|
||||
my $user = $c->stash->{user};
|
||||
|
||||
my ($project) = $c->model('DB::Projects')->search({ owner => $user->username });
|
||||
error($c, "User " . $user->username . " is still owner of project " . $project->name . ".")
|
||||
if defined $project;
|
||||
|
||||
$c->logout() if $user->username eq $c->user->username;
|
||||
|
||||
$user->delete;
|
||||
|
||||
$c->flash->{successMsg} = "The user has been deleted.";
|
||||
$self->status_no_content($c);
|
||||
}
|
||||
|
||||
|
||||
sub reset_password :Chained('user') :PathPart('reset-password') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
my $user = $c->stash->{user};
|
||||
|
||||
requirePost($c);
|
||||
|
||||
error($c, "This user's password cannot be reset.") if $user->type ne "hydra";
|
||||
error($c, "No email address is set for this user.")
|
||||
unless $user->emailaddress;
|
||||
|
||||
my $password = Crypt::RandPasswd->word(8,10);
|
||||
$user->setPassword($password);
|
||||
sendEmail(
|
||||
$c->config,
|
||||
$user->emailaddress,
|
||||
"Hydra password reset",
|
||||
"Hi,\n\n".
|
||||
"Your password has been reset. Your new password is '$password'.\n\n".
|
||||
"You can change your password at " . $c->uri_for($self->action_for('edit'), [$user->username]) . ".\n\n".
|
||||
"With regards,\n\nHydra.\n",
|
||||
[]
|
||||
);
|
||||
|
||||
$c->flash->{successMsg} = "A new password has been sent to ${\$user->emailaddress}.";
|
||||
$self->status_no_content($c);
|
||||
}
|
||||
|
||||
|
||||
sub dashboard_old :Chained('user') :PathPart('dashboard') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->res->redirect($c->uri_for($self->action_for("dashboard"), $c->req->captures));
|
||||
}
|
||||
|
||||
|
||||
sub dashboard_base :Chained('/') PathPart('dashboard') CaptureArgs(1) {
|
||||
my ($self, $c, $userName) = @_;
|
||||
|
||||
$c->stash->{user} = $c->model('DB::Users')->find($userName)
|
||||
or notFound($c, "User $userName doesn't exist.");
|
||||
|
||||
accessDenied($c, "You do not have permission to view this dashboard.")
|
||||
unless $c->stash->{user}->publicdashboard ||
|
||||
(defined $c->user && ($userName eq $c->user->username || !isAdmin($c)));
|
||||
}
|
||||
|
||||
|
||||
sub dashboard :Chained('dashboard_base') :PathPart('') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'dashboard.tt';
|
||||
|
||||
# Get the N most recent builds for each starred job.
|
||||
$c->stash->{starredJobs} = [];
|
||||
foreach my $j ($c->stash->{user}->starredjobs->search({}, { order_by => ['project', 'jobset', 'job'] })) {
|
||||
my @builds = $j->jobset->builds->search(
|
||||
{ job => $j->job },
|
||||
{ rows => 20, order_by => "id desc" });
|
||||
push @{$c->stash->{starredJobs}}, { job => $j, builds => [@builds] };
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{lazy} = 1;
|
||||
$c->stash->{template} = 'dashboard-my-jobs-tab.tt';
|
||||
|
||||
error($c, "No email address is set for this user.") unless $c->stash->{user}->emailaddress;
|
||||
|
||||
# Get all current builds of which this user is a maintainer.
|
||||
$c->stash->{builds} = [$c->model('DB::Builds')->search(
|
||||
{ iscurrent => 1
|
||||
, maintainers => { ilike => "%" . $c->stash->{user}->emailaddress . "%" }
|
||||
, "project.enabled" => 1
|
||||
, "jobset.enabled" => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"]
|
||||
, join => {"jobset" => "project"}
|
||||
})];
|
||||
}
|
||||
|
||||
|
||||
sub my_jobsets_tab :Chained('dashboard_base') :PathPart('my-jobsets-tab') :Args(0) {
|
||||
my ($self, $c) = @_;
|
||||
$c->stash->{template} = 'dashboard-my-jobsets-tab.tt';
|
||||
|
||||
my $jobsets = $c->model('DB::Jobsets')->search(
|
||||
{ "project.enabled" => 1, "me.enabled" => 1,
|
||||
, owner => $c->stash->{user}->username
|
||||
},
|
||||
{ order_by => ["project", "name"]
|
||||
, join => ["project"]
|
||||
});
|
||||
|
||||
$c->stash->{jobsets} = [jobsetOverview_($c, $jobsets)];
|
||||
}
|
||||
|
||||
|
||||
1;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user