Compare commits
1 Commits
nix-2.20
...
lazy-trees
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0118770092 |
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
push:
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,7 +38,6 @@ t/jobs/declarative/project.json
|
||||
hydra-config.h
|
||||
hydra-config.h.in
|
||||
result
|
||||
result-*
|
||||
outputs
|
||||
config
|
||||
stamp-h1
|
||||
|
||||
12
Makefile.am
12
Makefile.am
@@ -1,12 +1,8 @@
|
||||
SUBDIRS = src doc
|
||||
if CAN_DO_CHECK
|
||||
SUBDIRS += t
|
||||
endif
|
||||
|
||||
SUBDIRS = src t doc
|
||||
BOOTCLEAN_SUBDIRS = $(SUBDIRS)
|
||||
DIST_SUBDIRS = $(SUBDIRS)
|
||||
EXTRA_DIST = nixos-modules/hydra.nix
|
||||
EXTRA_DIST = hydra-module.nix
|
||||
|
||||
install-data-local: nixos-modules/hydra.nix
|
||||
install-data-local: hydra-module.nix
|
||||
$(INSTALL) -d $(DESTDIR)$(datadir)/nix
|
||||
$(INSTALL_DATA) nixos-modules/hydra.nix $(DESTDIR)$(datadir)/nix/hydra-module.nix
|
||||
$(INSTALL_DATA) hydra-module.nix $(DESTDIR)$(datadir)/nix/
|
||||
|
||||
@@ -80,7 +80,7 @@ $ nix-build
|
||||
You can use the provided shell.nix to get a working development environment:
|
||||
```
|
||||
$ nix-shell
|
||||
$ autoreconfPhase
|
||||
$ ./bootstrap
|
||||
$ configurePhase # NOTE: not ./configure
|
||||
$ make
|
||||
```
|
||||
|
||||
22
configure.ac
22
configure.ac
@@ -10,6 +10,8 @@ AC_PROG_LN_S
|
||||
AC_PROG_LIBTOOL
|
||||
AC_PROG_CXX
|
||||
|
||||
CXXFLAGS+=" -std=c++17"
|
||||
|
||||
AC_PATH_PROG([XSLTPROC], [xsltproc])
|
||||
|
||||
AC_ARG_WITH([docbook-xsl],
|
||||
@@ -53,6 +55,9 @@ PKG_CHECK_MODULES([NIX], [nix-main nix-expr nix-store])
|
||||
testPath="$(dirname $(type -p expr))"
|
||||
AC_SUBST(testPath)
|
||||
|
||||
jobsPath="$(realpath ./t/jobs)"
|
||||
AC_SUBST(jobsPath)
|
||||
|
||||
CXXFLAGS+=" -include nix/config.h"
|
||||
|
||||
AC_CONFIG_FILES([
|
||||
@@ -68,22 +73,11 @@ AC_CONFIG_FILES([
|
||||
src/lib/Makefile
|
||||
src/root/Makefile
|
||||
src/script/Makefile
|
||||
t/Makefile
|
||||
t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
])
|
||||
|
||||
# Tests might be filtered out
|
||||
AM_CONDITIONAL([CAN_DO_CHECK], [test -f "$srcdir/t/api-test.t"])
|
||||
AM_COND_IF(
|
||||
[CAN_DO_CHECK],
|
||||
[
|
||||
jobsPath="$(realpath ./t/jobs)"
|
||||
AC_SUBST(jobsPath)
|
||||
AC_CONFIG_FILES([
|
||||
t/Makefile
|
||||
t/jobs/config.nix
|
||||
t/jobs/declarative/project.json
|
||||
])
|
||||
])
|
||||
|
||||
AC_CONFIG_COMMANDS([executable-scripts], [])
|
||||
|
||||
AC_CONFIG_HEADER([hydra-config.h])
|
||||
|
||||
@@ -74,30 +74,6 @@ following:
|
||||
}
|
||||
}
|
||||
|
||||
Populating a Cache
|
||||
------------------
|
||||
|
||||
A common use for Hydra is to pre-build and cache derivations which
|
||||
take a long time to build. While it is possible to direcly access the
|
||||
Hydra server's store over SSH, a more scalable option is to upload
|
||||
built derivations to a remote store like an [S3-compatible object
|
||||
store](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html#s3-binary-cache-store). Setting
|
||||
the `store_uri` parameter will cause Hydra to sign and upload
|
||||
derivations as they are built:
|
||||
|
||||
```
|
||||
store_uri = s3://cache-bucket-name?compression=zstd¶llel-compression=true&write-nar-listing=1&ls-compression=br&log-compression=br&secret-key=/path/to/cache/private/key
|
||||
```
|
||||
|
||||
This example uses [Zstandard](https://github.com/facebook/zstd)
|
||||
compression on derivations to reduce CPU usage on the server, but
|
||||
[Brotli](https://brotli.org/) compression for derivation listings and
|
||||
build logs because it has better browser support.
|
||||
|
||||
See [`nix help
|
||||
stores`](https://nixos.org/manual/nix/stable/command-ref/new-cli/nix3-help-stores.html)
|
||||
for a description of the store URI format.
|
||||
|
||||
Statsd Configuration
|
||||
--------------------
|
||||
|
||||
@@ -155,8 +131,8 @@ use LDAP to manage roles and users.
|
||||
This is configured by defining the `<ldap>` block in the configuration file.
|
||||
In this block it's possible to configure the authentication plugin in the
|
||||
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
|
||||
The documentation for the available settings can be found
|
||||
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
The documentation for the available settings can be found [here]
|
||||
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
|
||||
|
||||
Note that the bind password (if needed) should be supplied as an included file to
|
||||
prevent it from leaking to the Nix store.
|
||||
@@ -203,7 +179,6 @@ Example configuration:
|
||||
<role_search_options>
|
||||
deref = always
|
||||
</role_search_options>
|
||||
</store>
|
||||
</config>
|
||||
<role_mapping>
|
||||
# Make all users in the hydra_admin group Hydra admins
|
||||
|
||||
@@ -18,7 +18,7 @@ $ nix-shell
|
||||
To build Hydra, you should then do:
|
||||
|
||||
```console
|
||||
[nix-shell]$ autoreconfPhase
|
||||
[nix-shell]$ ./bootstrap
|
||||
[nix-shell]$ configurePhase
|
||||
[nix-shell]$ make
|
||||
```
|
||||
|
||||
@@ -404,10 +404,3 @@ analogous:
|
||||
| `String value` | `gitea_status_repo` | *Name of the `Git checkout` input* |
|
||||
| `String value` | `gitea_http_url` | *Public URL of `gitea`*, optional |
|
||||
|
||||
Content-addressed derivations
|
||||
-----------------------------
|
||||
|
||||
Hydra can to a certain extent use the [`ca-derivations` experimental Nix feature](https://github.com/NixOS/rfcs/pull/62).
|
||||
To use it, make sure that the Nix version you use is at least as recent as the one used in hydra's flake.
|
||||
|
||||
Be warned that this support is still highly experimental, and anything beyond the basic functionality might be broken at that point.
|
||||
|
||||
81
flake.lock
generated
81
flake.lock
generated
@@ -1,89 +1,54 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-compat": {
|
||||
"lowdown-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"libgit2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1697646580,
|
||||
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"libgit2": "libgit2",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1715805674,
|
||||
"narHash": "sha256-0CIBMECsA3ISJZrJcOTzi6wa3QENTKGLtOpYIOoxwxo=",
|
||||
"owner": "NixOS",
|
||||
"lastModified": 1668607642,
|
||||
"narHash": "sha256-lNnk5thRq43XPcA+5KwoHgdsKf3urmE4B2xzHokVMbc=",
|
||||
"owner": "edolstra",
|
||||
"repo": "nix",
|
||||
"rev": "ab48ea416a203e9ccefb70aa634e27477e4c1ac4",
|
||||
"rev": "561440bd6ddebd53d7b42bced22cb78fd607a6de",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "2.20-maintenance",
|
||||
"owner": "edolstra",
|
||||
"ref": "lazy-trees",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1705033721,
|
||||
"narHash": "sha256-K5eJHmL1/kev6WuqyqqbS1cdNnSidIZ3jeqJ7GbrYnQ=",
|
||||
"lastModified": 1657693803,
|
||||
"narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a1982c92d8980a0114372973cbdfe0a307f1bdea",
|
||||
"rev": "365e1b3a859281cf11b94f87231adeabbdd878a2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.05-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-for-fileset": {
|
||||
"locked": {
|
||||
"lastModified": 1706098335,
|
||||
"narHash": "sha256-r3dWjT8P9/Ah5m5ul4WqIWD8muj5F+/gbCdjiNVBKmU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "a77ab169a83a4175169d78684ddd2e54486ac651",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"ref": "nixos-22.05-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -107,8 +72,10 @@
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-for-fileset": "nixpkgs-for-fileset"
|
||||
"nixpkgs": [
|
||||
"nix",
|
||||
"nixpkgs"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
291
flake.nix
291
flake.nix
@@ -1,25 +1,19 @@
|
||||
{
|
||||
description = "A Nix-based continuous build system";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05-small";
|
||||
inputs.nix.url = "github:NixOS/nix/2.20-maintenance";
|
||||
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.nixpkgs.follows = "nix/nixpkgs";
|
||||
inputs.nix.url = "github:edolstra/nix/lazy-trees";
|
||||
|
||||
# TODO get rid of this once https://github.com/NixOS/nix/pull/9546 is
|
||||
# mered and we upgrade or Nix, so the main `nixpkgs` input is at least
|
||||
# 23.11 and has `lib.fileset`.
|
||||
inputs.nixpkgs-for-fileset.url = "github:NixOS/nixpkgs/nixos-23.11";
|
||||
|
||||
outputs = { self, nixpkgs, nix, nixpkgs-for-fileset }:
|
||||
outputs = { self, nixpkgs, nix }:
|
||||
let
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
|
||||
|
||||
systems = [ "x86_64-linux" "aarch64-linux" ];
|
||||
forEachSystem = nixpkgs.lib.genAttrs systems;
|
||||
|
||||
overlayList = [ self.overlays.default nix.overlays.default ];
|
||||
|
||||
pkgsBySystem = forEachSystem (system: import nixpkgs {
|
||||
inherit system;
|
||||
overlays = overlayList;
|
||||
overlays = [ self.overlays.default nix.overlays.default ];
|
||||
});
|
||||
|
||||
# NixOS configuration used for VM tests.
|
||||
@@ -66,9 +60,197 @@
|
||||
|
||||
};
|
||||
|
||||
hydra = final.callPackage ./package.nix {
|
||||
inherit (nixpkgs-for-fileset.lib) fileset;
|
||||
rawSrc = self;
|
||||
hydra = with final; let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = with perlPackages; lib.closePropagation
|
||||
[
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
final.nix.perl-bindings
|
||||
git
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
];
|
||||
};
|
||||
|
||||
in
|
||||
stdenv.mkDerivation {
|
||||
|
||||
name = "hydra-${version}";
|
||||
|
||||
src = self;
|
||||
|
||||
buildInputs =
|
||||
[
|
||||
makeWrapper
|
||||
autoconf
|
||||
automake
|
||||
libtool
|
||||
unzip
|
||||
nukeReferences
|
||||
pkg-config
|
||||
libpqxx
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
subversion
|
||||
breezy
|
||||
openssl
|
||||
bzip2
|
||||
libxslt
|
||||
final.nix
|
||||
perlDeps
|
||||
perl
|
||||
mdbook
|
||||
pixz
|
||||
boost
|
||||
postgresql_13
|
||||
(if lib.versionAtLeast lib.version "20.03pre"
|
||||
then nlohmann_json
|
||||
else nlohmann_json.override { multipleHeaders = true; })
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
foreman
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
openldap
|
||||
python3
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
final.nix
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
preConfigure = "autoreconf -vfi";
|
||||
|
||||
NIX_LDFLAGS = [ "-lpthread" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
doCheck = true;
|
||||
|
||||
preCheck = ''
|
||||
patchShebangs .
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${final.nix.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${final.stdenv.system}";
|
||||
passthru = { inherit perlDeps; inherit (final) nix; };
|
||||
};
|
||||
};
|
||||
|
||||
@@ -76,15 +258,9 @@
|
||||
|
||||
build = forEachSystem (system: packages.${system}.hydra);
|
||||
|
||||
buildNoTests = forEachSystem (system:
|
||||
packages.${system}.hydra.overrideAttrs (_: {
|
||||
doCheck = false;
|
||||
})
|
||||
);
|
||||
|
||||
manual = forEachSystem (system:
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
pkgs.runCommand "hydra-manual-${pkgs.hydra.version}" { }
|
||||
pkgs.runCommand "hydra-manual-${version}" { }
|
||||
''
|
||||
mkdir -p $out/share
|
||||
cp -prvd ${pkgs.hydra}/share/doc $out/share/
|
||||
@@ -96,7 +272,6 @@
|
||||
tests.install = forEachSystem (system:
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-install";
|
||||
nodes.machine = hydraServer;
|
||||
testScript =
|
||||
''
|
||||
@@ -104,7 +279,7 @@
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_job("hydra-evaluator")
|
||||
machine.wait_for_job("hydra-queue-runner")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port("3000")
|
||||
machine.succeed("curl --fail http://localhost:3000/")
|
||||
'';
|
||||
});
|
||||
@@ -113,7 +288,6 @@
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
simpleTest {
|
||||
name = "hydra-notifications";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
@@ -141,7 +315,7 @@
|
||||
|
||||
# Wait until InfluxDB can receive web requests
|
||||
machine.wait_for_job("influxdb")
|
||||
machine.wait_for_open_port(8086)
|
||||
machine.wait_for_open_port("8086")
|
||||
|
||||
# Create an InfluxDB database where hydra will write to
|
||||
machine.succeed(
|
||||
@@ -151,7 +325,7 @@
|
||||
|
||||
# Wait until hydra-server can receive HTTP requests
|
||||
machine.wait_for_job("hydra-server")
|
||||
machine.wait_for_open_port(3000)
|
||||
machine.wait_for_open_port("3000")
|
||||
|
||||
# Setup the project and jobset
|
||||
machine.succeed(
|
||||
@@ -172,7 +346,6 @@
|
||||
let pkgs = pkgsBySystem.${system}; in
|
||||
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
|
||||
makeTest {
|
||||
name = "hydra-gitea";
|
||||
nodes.machine = { pkgs, ... }: {
|
||||
imports = [ hydraServer ];
|
||||
services.hydra-dev.extraConfig = ''
|
||||
@@ -180,9 +353,13 @@
|
||||
root=d7f16a3412e01a43a414535b16007c6931d3a9c7
|
||||
</gitea_authorization>
|
||||
'';
|
||||
nixpkgs.config.permittedInsecurePackages = [ "gitea-1.19.4" ];
|
||||
nix = {
|
||||
settings.substituters = [ ];
|
||||
distributedBuilds = true;
|
||||
buildMachines = [{
|
||||
hostName = "localhost";
|
||||
systems = [ system ];
|
||||
}];
|
||||
binaryCaches = [ ];
|
||||
};
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
@@ -198,7 +375,7 @@
|
||||
testScript =
|
||||
let
|
||||
scripts.mktoken = pkgs.writeText "token.sql" ''
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight, scope) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7', 'all');
|
||||
INSERT INTO access_token (id, uid, name, created_unix, updated_unix, token_hash, token_salt, token_last_eight) VALUES (1, 1, 'hydra', 1617107360, 1617107360, 'a930f319ca362d7b49a4040ac0af74521c3a3c3303a86f327b01994430672d33b6ec53e4ea774253208686c712495e12a486', 'XRjWE9YW0g', '31d3a9c7');
|
||||
'';
|
||||
|
||||
scripts.git-setup = pkgs.writeShellScript "setup.sh" ''
|
||||
@@ -353,9 +530,9 @@
|
||||
|
||||
response = json.loads(data)
|
||||
|
||||
assert len(response) == 2, "Expected exactly three status updates for latest commit (queued, finished)!"
|
||||
assert response[0]['status'] == "success", "Expected finished status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected queued status to be pending!"
|
||||
assert len(response) == 2, "Expected exactly two status updates for latest commit!"
|
||||
assert response[0]['status'] == "success", "Expected latest status to be success!"
|
||||
assert response[1]['status'] == "pending", "Expected first status to be pending!"
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
@@ -384,8 +561,50 @@
|
||||
default = pkgsBySystem.${system}.hydra;
|
||||
});
|
||||
|
||||
nixosModules = import ./nixos-modules {
|
||||
overlays = overlayList;
|
||||
nixosModules.hydra = {
|
||||
imports = [ ./hydra-module.nix ];
|
||||
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
|
||||
};
|
||||
|
||||
nixosModules.hydraTest = { pkgs, ... }: {
|
||||
imports = [ self.nixosModules.hydra ];
|
||||
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_11;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
nixosModules.hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
nixosConfigurations.container = nixpkgs.lib.nixosSystem {
|
||||
|
||||
@@ -533,13 +533,13 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
|
||||
/eval/{eval-id}:
|
||||
/eval/{build-id}:
|
||||
get:
|
||||
summary: Retrieves evaluations identified by eval id
|
||||
summary: Retrieves evaluations identified by build id
|
||||
parameters:
|
||||
- name: eval-id
|
||||
- name: build-id
|
||||
in: path
|
||||
description: eval identifier
|
||||
description: build identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
@@ -551,24 +551,6 @@ paths:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEval'
|
||||
|
||||
/eval/{eval-id}/builds:
|
||||
get:
|
||||
summary: Retrieves all builds belonging to an evaluation identified by eval id
|
||||
parameters:
|
||||
- name: eval-id
|
||||
in: path
|
||||
description: eval identifier
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
responses:
|
||||
'200':
|
||||
description: builds
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/JobsetEvalBuilds'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
|
||||
@@ -814,13 +796,6 @@ components:
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/JobsetEvalInput'
|
||||
|
||||
JobsetEvalBuilds:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/Build'
|
||||
|
||||
JobsetOverview:
|
||||
type: array
|
||||
items:
|
||||
@@ -895,7 +870,7 @@ components:
|
||||
description: Size of the produced file
|
||||
type: integer
|
||||
defaultpath:
|
||||
description: if path is a directory, the default file relative to path to be served
|
||||
description: This is a Git/Mercurial commit hash or a Subversion revision number
|
||||
type: string
|
||||
'type':
|
||||
description: Types of build product (user defined)
|
||||
|
||||
@@ -340,7 +340,7 @@ in
|
||||
systemd.services.hydra-queue-runner =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "hydra-init.service" ];
|
||||
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||
after = [ "hydra-init.service" "network.target" ];
|
||||
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
|
||||
restartTriggers = [ hydraConf ];
|
||||
environment = env // {
|
||||
@@ -1,49 +0,0 @@
|
||||
{ overlays }:
|
||||
|
||||
rec {
|
||||
hydra = {
|
||||
imports = [ ./hydra.nix ];
|
||||
nixpkgs = { inherit overlays; };
|
||||
};
|
||||
|
||||
hydraTest = { pkgs, ... }: {
|
||||
imports = [ hydra ];
|
||||
|
||||
services.hydra-dev.enable = true;
|
||||
services.hydra-dev.hydraURL = "http://hydra.example.org";
|
||||
services.hydra-dev.notificationSender = "admin@hydra.example.org";
|
||||
|
||||
systemd.services.hydra-send-stats.enable = false;
|
||||
|
||||
services.postgresql.enable = true;
|
||||
services.postgresql.package = pkgs.postgresql_11;
|
||||
|
||||
# The following is to work around the following error from hydra-server:
|
||||
# [error] Caught exception in engine "Cannot determine local time zone"
|
||||
time.timeZone = "UTC";
|
||||
|
||||
nix.extraOptions = ''
|
||||
allowed-uris = https://github.com/
|
||||
'';
|
||||
};
|
||||
|
||||
hydraProxy = {
|
||||
services.httpd = {
|
||||
enable = true;
|
||||
adminAddr = "hydra-admin@example.org";
|
||||
extraConfig = ''
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass /apache-errors !
|
||||
ErrorDocument 503 /apache-errors/503.html
|
||||
ProxyPass / http://127.0.0.1:3000/ retry=5 disablereuse=on
|
||||
ProxyPassReverse / http://127.0.0.1:3000/
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
272
package.nix
272
package.nix
@@ -1,272 +0,0 @@
|
||||
{ stdenv
|
||||
, lib
|
||||
, fileset
|
||||
|
||||
, rawSrc
|
||||
|
||||
, buildEnv
|
||||
|
||||
, perlPackages
|
||||
|
||||
, nix
|
||||
, git
|
||||
|
||||
, makeWrapper
|
||||
, autoreconfHook
|
||||
, nukeReferences
|
||||
, pkg-config
|
||||
, mdbook
|
||||
|
||||
, unzip
|
||||
, libpqxx
|
||||
, top-git
|
||||
, mercurial
|
||||
, darcs
|
||||
, subversion
|
||||
, breezy
|
||||
, openssl
|
||||
, bzip2
|
||||
, libxslt
|
||||
, perl
|
||||
, pixz
|
||||
, boost
|
||||
, postgresql_13
|
||||
, nlohmann_json
|
||||
, prometheus-cpp
|
||||
|
||||
, cacert
|
||||
, foreman
|
||||
, glibcLocales
|
||||
, libressl
|
||||
, openldap
|
||||
, python3
|
||||
|
||||
, openssh
|
||||
, coreutils
|
||||
, findutils
|
||||
, gzip
|
||||
, xz
|
||||
, gnutar
|
||||
, gnused
|
||||
|
||||
, rpm
|
||||
, dpkg
|
||||
, cdrkit
|
||||
}:
|
||||
|
||||
let
|
||||
perlDeps = buildEnv {
|
||||
name = "hydra-perl-deps";
|
||||
paths = lib.closePropagation
|
||||
([
|
||||
nix.perl-bindings
|
||||
git
|
||||
] ++ (with perlPackages; [
|
||||
AuthenSASL
|
||||
CatalystActionREST
|
||||
CatalystAuthenticationStoreDBIxClass
|
||||
CatalystAuthenticationStoreLDAP
|
||||
CatalystDevel
|
||||
CatalystPluginAccessLog
|
||||
CatalystPluginAuthorizationRoles
|
||||
CatalystPluginCaptcha
|
||||
CatalystPluginPrometheusTiny
|
||||
CatalystPluginSessionStateCookie
|
||||
CatalystPluginSessionStoreFastMmap
|
||||
CatalystPluginStackTrace
|
||||
CatalystTraitForRequestProxyBase
|
||||
CatalystViewDownload
|
||||
CatalystViewJSON
|
||||
CatalystViewTT
|
||||
CatalystXRoleApplicator
|
||||
CatalystXScriptServerStarman
|
||||
CryptPassphrase
|
||||
CryptPassphraseArgon2
|
||||
CryptRandPasswd
|
||||
DataDump
|
||||
DateTime
|
||||
DBDPg
|
||||
DBDSQLite
|
||||
DigestSHA1
|
||||
EmailMIME
|
||||
EmailSender
|
||||
FileLibMagic
|
||||
FileSlurper
|
||||
FileWhich
|
||||
IOCompress
|
||||
IPCRun
|
||||
IPCRun3
|
||||
JSON
|
||||
JSONMaybeXS
|
||||
JSONXS
|
||||
ListSomeUtils
|
||||
LWP
|
||||
LWPProtocolHttps
|
||||
ModulePluggable
|
||||
NetAmazonS3
|
||||
NetPrometheus
|
||||
NetStatsd
|
||||
PadWalker
|
||||
ParallelForkManager
|
||||
PerlCriticCommunity
|
||||
PrometheusTinyShared
|
||||
ReadonlyX
|
||||
SetScalar
|
||||
SQLSplitStatement
|
||||
Starman
|
||||
StringCompareConstantTime
|
||||
SysHostnameLong
|
||||
TermSizeAny
|
||||
TermReadKey
|
||||
Test2Harness
|
||||
TestPostgreSQL
|
||||
TextDiff
|
||||
TextTable
|
||||
UUID4Tiny
|
||||
YAML
|
||||
XMLSimple
|
||||
]));
|
||||
};
|
||||
|
||||
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (rawSrc.lastModifiedDate or "19700101")}.${rawSrc.shortRev or "DIRTY"}";
|
||||
in
|
||||
stdenv.mkDerivation (finalAttrs: {
|
||||
pname = "hydra";
|
||||
inherit version;
|
||||
|
||||
src = fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = fileset.unions ([
|
||||
./version.txt
|
||||
./configure.ac
|
||||
./Makefile.am
|
||||
./src
|
||||
./doc
|
||||
./nixos-modules/hydra.nix
|
||||
# These are always needed to appease Automake
|
||||
./t/Makefile.am
|
||||
./t/jobs/config.nix.in
|
||||
./t/jobs/declarative/project.json.in
|
||||
] ++ lib.optionals finalAttrs.doCheck [
|
||||
./t
|
||||
./.perlcriticrc
|
||||
./.yath.rc
|
||||
]);
|
||||
};
|
||||
|
||||
strictDeps = true;
|
||||
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
autoreconfHook
|
||||
nukeReferences
|
||||
pkg-config
|
||||
mdbook
|
||||
nix
|
||||
perlDeps
|
||||
perl
|
||||
unzip
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
libpqxx
|
||||
openssl
|
||||
libxslt
|
||||
nix
|
||||
perlDeps
|
||||
perl
|
||||
boost
|
||||
nlohmann_json
|
||||
prometheus-cpp
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
bzip2
|
||||
darcs
|
||||
foreman
|
||||
top-git
|
||||
mercurial
|
||||
subversion
|
||||
breezy
|
||||
openldap
|
||||
postgresql_13
|
||||
pixz
|
||||
];
|
||||
|
||||
checkInputs = [
|
||||
cacert
|
||||
glibcLocales
|
||||
libressl.nc
|
||||
python3
|
||||
];
|
||||
|
||||
hydraPath = lib.makeBinPath (
|
||||
[
|
||||
subversion
|
||||
openssh
|
||||
nix
|
||||
coreutils
|
||||
findutils
|
||||
pixz
|
||||
gzip
|
||||
bzip2
|
||||
xz
|
||||
gnutar
|
||||
unzip
|
||||
git
|
||||
top-git
|
||||
mercurial
|
||||
darcs
|
||||
gnused
|
||||
breezy
|
||||
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
|
||||
);
|
||||
|
||||
OPENLDAP_ROOT = openldap;
|
||||
|
||||
shellHook = ''
|
||||
pushd $(git rev-parse --show-toplevel) >/dev/null
|
||||
|
||||
PATH=$(pwd)/src/hydra-evaluator:$(pwd)/src/script:$(pwd)/src/hydra-eval-jobs:$(pwd)/src/hydra-queue-runner:$PATH
|
||||
PERL5LIB=$(pwd)/src/lib:$PERL5LIB
|
||||
export HYDRA_HOME="$(pwd)/src/"
|
||||
mkdir -p .hydra-data
|
||||
export HYDRA_DATA="$(pwd)/.hydra-data"
|
||||
export HYDRA_DBI='dbi:Pg:dbname=hydra;host=localhost;port=64444'
|
||||
|
||||
popd >/dev/null
|
||||
'';
|
||||
|
||||
NIX_LDFLAGS = [ "-lpthread" ];
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
doCheck = true;
|
||||
|
||||
preCheck = ''
|
||||
patchShebangs .
|
||||
export LOGNAME=''${LOGNAME:-foo}
|
||||
# set $HOME for bzr so it can create its trace file
|
||||
export HOME=$(mktemp -d)
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/nix-support
|
||||
|
||||
for i in $out/bin/*; do
|
||||
read -n 4 chars < $i
|
||||
if [[ $chars =~ ELF ]]; then continue; fi
|
||||
wrapProgram $i \
|
||||
--prefix PERL5LIB ':' $out/libexec/hydra/lib:$PERL5LIB \
|
||||
--prefix PATH ':' $out/bin:$hydraPath \
|
||||
--set HYDRA_RELEASE ${version} \
|
||||
--set HYDRA_HOME $out/libexec/hydra \
|
||||
--set NIX_RELEASE ${nix.name or "unknown"}
|
||||
done
|
||||
'';
|
||||
|
||||
dontStrip = true;
|
||||
|
||||
meta.description = "Build of Hydra on ${stdenv.system}";
|
||||
passthru = { inherit perlDeps nix; };
|
||||
})
|
||||
@@ -7,9 +7,6 @@
|
||||
#include "store-api.hh"
|
||||
#include "eval.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "eval-settings.hh"
|
||||
#include "signals.hh"
|
||||
#include "terminal.hh"
|
||||
#include "util.hh"
|
||||
#include "get-drvs.hh"
|
||||
#include "globals.hh"
|
||||
@@ -28,8 +25,7 @@
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
void check_pid_status_nonblocking(pid_t check_pid)
|
||||
{
|
||||
void check_pid_status_nonblocking(pid_t check_pid) {
|
||||
// Only check 'initialized' and known PID's
|
||||
if (check_pid <= 0) { return; }
|
||||
|
||||
@@ -56,7 +52,7 @@ using namespace nix;
|
||||
static Path gcRootsDir;
|
||||
static size_t maxMemorySize;
|
||||
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
|
||||
struct MyArgs : MixEvalArgs, MixCommonArgs
|
||||
{
|
||||
Path releaseExpr;
|
||||
bool flake = false;
|
||||
@@ -89,7 +85,7 @@ struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
|
||||
|
||||
static MyArgs myArgs;
|
||||
|
||||
static std::string queryMetaStrings(EvalState & state, PackageInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||
static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std::string & name, const std::string & subAttribute)
|
||||
{
|
||||
Strings res;
|
||||
std::function<void(Value & v)> rec;
|
||||
@@ -97,14 +93,14 @@ static std::string queryMetaStrings(EvalState & state, PackageInfo & drv, const
|
||||
rec = [&](Value & v) {
|
||||
state.forceValue(v, noPos);
|
||||
if (v.type() == nString)
|
||||
res.emplace_back(v.string_view());
|
||||
res.push_back(v.string.s);
|
||||
else if (v.isList())
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
rec(*v.listElems()[n]);
|
||||
else if (v.type() == nAttrs) {
|
||||
auto a = v.attrs->find(state.symbols.create(subAttribute));
|
||||
if (a != v.attrs->end())
|
||||
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
|
||||
res.push_back(std::string(state.forceString(*a->value)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -178,11 +174,7 @@ static void worker(
|
||||
|
||||
if (auto drv = getDerivation(state, *v, false)) {
|
||||
|
||||
// CA derivations do not have static output paths, so we
|
||||
// have to defensively not query output paths in case we
|
||||
// encounter one.
|
||||
PackageInfo::Outputs outputs = drv->queryOutputs(
|
||||
!experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||
DrvInfo::Outputs outputs = drv->queryOutputs();
|
||||
|
||||
if (drv->querySystem() == "unknown")
|
||||
throw EvalError("derivation must have a 'system' attribute");
|
||||
@@ -205,30 +197,26 @@ static void worker(
|
||||
|
||||
/* If this is an aggregate, then get its constituents. */
|
||||
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
|
||||
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
|
||||
if (a && state.forceBool(*a->value, a->pos)) {
|
||||
auto a = v->attrs->get(state.symbols.create("constituents"));
|
||||
if (!a)
|
||||
throw EvalError("derivation must have a ‘constituents’ attribute");
|
||||
|
||||
NixStringContext context;
|
||||
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
|
||||
for (auto & c : context)
|
||||
std::visit(overloaded {
|
||||
[&](const NixStringContextElem::Built & b) {
|
||||
job["constituents"].push_back(b.drvPath->to_string(*state.store));
|
||||
},
|
||||
[&](const NixStringContextElem::Opaque & o) {
|
||||
},
|
||||
[&](const NixStringContextElem::DrvDeep & d) {
|
||||
},
|
||||
}, c.raw);
|
||||
|
||||
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
|
||||
PathSet context;
|
||||
state.coerceToString(a->pos, *a->value, context, true, false);
|
||||
for (auto & i : context)
|
||||
if (i.at(0) == '!') {
|
||||
size_t index = i.find("!", 1);
|
||||
job["constituents"].push_back(std::string(i, index + 1));
|
||||
}
|
||||
|
||||
state.forceList(*a->value, a->pos);
|
||||
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
|
||||
auto v = a->value->listElems()[n];
|
||||
state.forceValue(*v, noPos);
|
||||
if (v->type() == nString)
|
||||
job["namedConstituents"].push_back(v->string_view());
|
||||
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,17 +231,12 @@ static void worker(
|
||||
}
|
||||
|
||||
nlohmann::json out;
|
||||
for (auto & [outputName, optOutputPath] : outputs) {
|
||||
if (optOutputPath) {
|
||||
out[outputName] = state.store->printStorePath(*optOutputPath);
|
||||
} else {
|
||||
// See the `queryOutputs` call above; we should
|
||||
// not encounter missing output paths otherwise.
|
||||
assert(experimentalFeatureSettings.isEnabled(Xp::CaDerivations));
|
||||
out[outputName] = nullptr;
|
||||
}
|
||||
}
|
||||
for (auto & j : outputs)
|
||||
// FIXME: handle CA/impure builds.
|
||||
if (j.second)
|
||||
out[j.first] = state.store->printStorePath(*j.second);
|
||||
job["outputs"] = std::move(out);
|
||||
|
||||
reply["job"] = std::move(job);
|
||||
}
|
||||
|
||||
@@ -262,7 +245,7 @@ static void worker(
|
||||
StringSet ss;
|
||||
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
|
||||
std::string name(state.symbols[i->name]);
|
||||
if (name.find(' ') != std::string::npos) {
|
||||
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
|
||||
printError("skipping job with illegal name '%s'", name);
|
||||
continue;
|
||||
}
|
||||
@@ -433,11 +416,7 @@ int main(int argc, char * * argv)
|
||||
|
||||
if (response.find("attrs") != response.end()) {
|
||||
for (auto & i : response["attrs"]) {
|
||||
std::string path = i;
|
||||
if (path.find(".") != std::string::npos){
|
||||
path = "\"" + path + "\"";
|
||||
}
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path;
|
||||
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
|
||||
newAttrs.insert(s);
|
||||
}
|
||||
}
|
||||
@@ -528,7 +507,7 @@ int main(int argc, char * * argv)
|
||||
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
|
||||
auto drv2 = store->readDerivation(drvPath2);
|
||||
job["constituents"].push_back(store->printStorePath(drvPath2));
|
||||
drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first};
|
||||
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
|
||||
}
|
||||
|
||||
if (brokenJobs.empty()) {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
#include "hydra-config.hh"
|
||||
#include "pool.hh"
|
||||
#include "shared.hh"
|
||||
#include "signals.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
@@ -367,9 +366,6 @@ struct Evaluator
|
||||
printInfo("received jobset event");
|
||||
}
|
||||
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in database monitor thread: %s", e.what());
|
||||
sleep(30);
|
||||
@@ -477,9 +473,6 @@ struct Evaluator
|
||||
while (true) {
|
||||
try {
|
||||
loop();
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printError("Database connection broken: %s", e.what());
|
||||
std::_Exit(1);
|
||||
} catch (std::exception & e) {
|
||||
printError("exception in main loop: %s", e.what());
|
||||
sleep(30);
|
||||
|
||||
@@ -6,22 +6,27 @@
|
||||
#include <fcntl.h>
|
||||
|
||||
#include "build-result.hh"
|
||||
#include "path.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "state.hh"
|
||||
#include "current-process.hh"
|
||||
#include "processes.hh"
|
||||
#include "util.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "ssh.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "finally.hh"
|
||||
#include "url.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
namespace nix::build_remote {
|
||||
|
||||
struct Child
|
||||
{
|
||||
Pid pid;
|
||||
AutoCloseFD to, from;
|
||||
};
|
||||
|
||||
|
||||
static void append(Strings & dst, const Strings & src)
|
||||
{
|
||||
dst.insert(dst.end(), src.begin(), src.end());
|
||||
}
|
||||
|
||||
static Strings extraStoreArgs(std::string & machine)
|
||||
{
|
||||
@@ -43,28 +48,64 @@ static Strings extraStoreArgs(std::string & machine)
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::unique_ptr<SSHMaster::Connection> openConnection(
|
||||
::Machine::ptr machine, SSHMaster & master)
|
||||
static void openConnection(Machine::ptr machine, Path tmpDir, int stderrFD, Child & child)
|
||||
{
|
||||
Strings command = {"nix-store", "--serve", "--write"};
|
||||
std::string pgmName;
|
||||
Pipe to, from;
|
||||
to.create();
|
||||
from.create();
|
||||
|
||||
Strings argv;
|
||||
if (machine->isLocalhost()) {
|
||||
command.push_back("--builders");
|
||||
command.push_back("");
|
||||
pgmName = "nix-store";
|
||||
argv = {"nix-store", "--builders", "", "--serve", "--write"};
|
||||
} else {
|
||||
command.splice(command.end(), extraStoreArgs(machine->sshName));
|
||||
pgmName = "ssh";
|
||||
auto sshName = machine->sshName;
|
||||
Strings extraArgs = extraStoreArgs(sshName);
|
||||
argv = {"ssh", sshName};
|
||||
if (machine->sshKey != "") append(argv, {"-i", machine->sshKey});
|
||||
if (machine->sshPublicHostKey != "") {
|
||||
Path fileName = tmpDir + "/host-key";
|
||||
auto p = machine->sshName.find("@");
|
||||
std::string host = p != std::string::npos ? std::string(machine->sshName, p + 1) : machine->sshName;
|
||||
writeFile(fileName, host + " " + machine->sshPublicHostKey + "\n");
|
||||
append(argv, {"-oUserKnownHostsFile=" + fileName});
|
||||
}
|
||||
append(argv,
|
||||
{ "-x", "-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
, "--", "nix-store", "--serve", "--write" });
|
||||
append(argv, extraArgs);
|
||||
}
|
||||
|
||||
return master.startCommand(std::move(command), {
|
||||
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
|
||||
child.pid = startProcess([&]() {
|
||||
restoreProcessContext();
|
||||
|
||||
if (dup2(to.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("cannot dup input pipe to stdin");
|
||||
|
||||
if (dup2(from.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("cannot dup output pipe to stdout");
|
||||
|
||||
if (dup2(stderrFD, STDERR_FILENO) == -1)
|
||||
throw SysError("cannot dup stderr");
|
||||
|
||||
execvp(argv.front().c_str(), (char * *) stringsToCharPtrs(argv).data()); // FIXME: remove cast
|
||||
|
||||
throw SysError("cannot start %s", pgmName);
|
||||
});
|
||||
|
||||
to.readSide = -1;
|
||||
from.writeSide = -1;
|
||||
|
||||
child.to = to.writeSide.release();
|
||||
child.from = from.readSide.release();
|
||||
}
|
||||
|
||||
|
||||
static void copyClosureTo(
|
||||
::Machine::Connection & conn,
|
||||
Store & destStore,
|
||||
const StorePathSet & paths,
|
||||
SubstituteFlag useSubstitutes = NoSubstitute)
|
||||
static void copyClosureTo(std::timed_mutex & sendMutex, Store & destStore,
|
||||
FdSource & from, FdSink & to, const StorePathSet & paths,
|
||||
bool useSubstitutes = false)
|
||||
{
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(paths, closure);
|
||||
@@ -74,10 +115,13 @@ static void copyClosureTo(
|
||||
garbage-collect paths that are already there. Optionally, ask
|
||||
the remote host to substitute missing paths. */
|
||||
// FIXME: substitute output pollutes our build log
|
||||
to << cmdQueryValidPaths << 1 << useSubstitutes;
|
||||
worker_proto::write(destStore, to, closure);
|
||||
to.flush();
|
||||
|
||||
/* Get back the set of paths that are already valid on the remote
|
||||
host. */
|
||||
auto present = conn.queryValidPaths(
|
||||
destStore, true, closure, useSubstitutes);
|
||||
auto present = worker_proto::read(destStore, from, Phantom<StorePathSet> {});
|
||||
|
||||
if (present.size() == closure.size()) return;
|
||||
|
||||
@@ -89,20 +133,20 @@ static void copyClosureTo(
|
||||
|
||||
printMsg(lvlDebug, "sending %d missing paths", missing.size());
|
||||
|
||||
std::unique_lock<std::timed_mutex> sendLock(conn.machine->state->sendLock,
|
||||
std::unique_lock<std::timed_mutex> sendLock(sendMutex,
|
||||
std::chrono::seconds(600));
|
||||
|
||||
conn.to << ServeProto::Command::ImportPaths;
|
||||
destStore.exportPaths(missing, conn.to);
|
||||
conn.to.flush();
|
||||
to << cmdImportPaths;
|
||||
destStore.exportPaths(missing, to);
|
||||
to.flush();
|
||||
|
||||
if (readInt(conn.from) != 1)
|
||||
if (readInt(from) != 1)
|
||||
throw Error("remote machine failed to import closure");
|
||||
}
|
||||
|
||||
|
||||
// FIXME: use Store::topoSortPaths().
|
||||
static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPathInfo> & paths)
|
||||
StorePaths reverseTopoSortPaths(const std::map<StorePath, ValidPathInfo> & paths)
|
||||
{
|
||||
StorePaths sorted;
|
||||
StorePathSet visited;
|
||||
@@ -130,311 +174,40 @@ static StorePaths reverseTopoSortPaths(const std::map<StorePath, UnkeyedValidPat
|
||||
return sorted;
|
||||
}
|
||||
|
||||
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
|
||||
{
|
||||
std::string base(drvPath.to_string());
|
||||
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
|
||||
createDirs(dirOf(logFile));
|
||||
|
||||
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", logFile);
|
||||
|
||||
return {std::move(logFile), std::move(logFD)};
|
||||
}
|
||||
|
||||
static BasicDerivation sendInputs(
|
||||
State & state,
|
||||
Step & step,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
::Machine::Connection & conn,
|
||||
unsigned int & overhead,
|
||||
counter & nrStepsWaiting,
|
||||
counter & nrStepsCopyingTo
|
||||
)
|
||||
{
|
||||
/* Replace the input derivations by their output paths to send a
|
||||
minimal closure to the builder.
|
||||
|
||||
`tryResolve` currently does *not* rewrite input addresses, so it
|
||||
is safe to do this in all cases. (It should probably have a mode
|
||||
to do that, however, but we would not use it here.)
|
||||
*/
|
||||
BasicDerivation basicDrv = ({
|
||||
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
|
||||
if (!maybeBasicDrv)
|
||||
throw Error(
|
||||
"the derivation '%s' can’t be resolved. It’s probably "
|
||||
"missing some outputs",
|
||||
localStore.printStorePath(step.drvPath));
|
||||
*maybeBasicDrv;
|
||||
});
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (&localStore != &destStore) {
|
||||
copyClosure(localStore, destStore,
|
||||
step.drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore.printStorePath(step.drvPath), conn.machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (conn.machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore.computeFSClosure(basicDrv.inputSrcs, closure);
|
||||
copyPaths(destStore, localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(conn, destStore, basicDrv.inputSrcs, Substitute);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
return basicDrv;
|
||||
}
|
||||
|
||||
static BuildResult performBuild(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePath drvPath,
|
||||
const BasicDerivation & drv,
|
||||
const ServeProto::BuildOptions & options,
|
||||
counter & nrStepsBuilding
|
||||
)
|
||||
{
|
||||
conn.putBuildDerivationRequest(localStore, drvPath, drv, options);
|
||||
|
||||
BuildResult result;
|
||||
|
||||
time_t startTime, stopTime;
|
||||
|
||||
startTime = time(0);
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
result = ServeProto::Serialise<BuildResult>::read(localStore, conn);
|
||||
}
|
||||
stopTime = time(0);
|
||||
|
||||
if (!result.startTime) {
|
||||
// If the builder gave `startTime = 0`, use our measurements
|
||||
// instead of the builder's.
|
||||
//
|
||||
// Note: this represents the duration of a single round, rather
|
||||
// than all rounds.
|
||||
result.startTime = startTime;
|
||||
result.stopTime = stopTime;
|
||||
}
|
||||
|
||||
// If the protocol was too old to give us `builtOutputs`, initialize
|
||||
// it manually by introspecting the derivation.
|
||||
if (GET_PROTOCOL_MINOR(conn.remoteVersion) < 6)
|
||||
{
|
||||
// If the remote is too old to handle CA derivations, we can’t get this
|
||||
// far anyways
|
||||
assert(drv.type().hasKnownOutputPaths());
|
||||
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
|
||||
// Since this a `BasicDerivation`, `staticOutputHashes` will not
|
||||
// do any real work.
|
||||
auto outputHashes = staticOutputHashes(localStore, drv);
|
||||
for (auto & [outputName, output] : drvOutputs) {
|
||||
auto outputPath = output.second;
|
||||
// We’ve just asserted that the output paths of the derivation
|
||||
// were known
|
||||
assert(outputPath);
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
auto drvOutput = DrvOutput { outputHash, outputName };
|
||||
result.builtOutputs.insert_or_assign(
|
||||
std::move(outputName),
|
||||
Realisation { drvOutput, *outputPath });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::map<StorePath, UnkeyedValidPathInfo> queryPathInfos(
|
||||
::Machine::Connection & conn,
|
||||
Store & localStore,
|
||||
StorePathSet & outputs,
|
||||
size_t & totalNarSize
|
||||
)
|
||||
{
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, UnkeyedValidPathInfo> infos;
|
||||
conn.to << ServeProto::Command::QueryPathInfos;
|
||||
ServeProto::write(localStore, conn, outputs);
|
||||
conn.to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(conn.from);
|
||||
if (storePathS == "") break;
|
||||
|
||||
auto storePath = localStore.parseStorePath(storePathS);
|
||||
auto info = ServeProto::Serialise<UnkeyedValidPathInfo>::read(localStore, conn);
|
||||
totalNarSize += info.narSize;
|
||||
infos.insert_or_assign(std::move(storePath), std::move(info));
|
||||
}
|
||||
|
||||
return infos;
|
||||
}
|
||||
|
||||
static void copyPathFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const ValidPathInfo & info
|
||||
)
|
||||
{
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
conn.to << ServeProto::Command::DumpStorePath << localStore.printStorePath(info.path);
|
||||
conn.to.flush();
|
||||
|
||||
TeeSource tee(conn.from, sink);
|
||||
extractNarData(tee, localStore.printStorePath(info.path), narMembers);
|
||||
});
|
||||
|
||||
destStore.addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
static void copyPathsFromRemote(
|
||||
::Machine::Connection & conn,
|
||||
NarMemberDatas & narMembers,
|
||||
Store & localStore,
|
||||
Store & destStore,
|
||||
const std::map<StorePath, UnkeyedValidPathInfo> & infos
|
||||
)
|
||||
{
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
copyPathFromRemote(
|
||||
conn, narMembers, localStore, destStore,
|
||||
ValidPathInfo { path, info });
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* using namespace nix::build_remote; */
|
||||
|
||||
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
|
||||
{
|
||||
startTime = buildResult.startTime;
|
||||
stopTime = buildResult.stopTime;
|
||||
timesBuilt = buildResult.timesBuilt;
|
||||
errorMsg = buildResult.errorMsg;
|
||||
isNonDeterministic = buildResult.isNonDeterministic;
|
||||
|
||||
switch ((BuildResult::Status) buildResult.status) {
|
||||
case BuildResult::Built:
|
||||
stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
stepStatus = bsSuccess;
|
||||
isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
stepStatus = bsFailed;
|
||||
canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
stepStatus = bsFailed;
|
||||
canRetry = true;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
stepStatus = bsTimedOut;
|
||||
errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
stepStatus = bsAborted;
|
||||
canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
stepStatus = bsNotDeterministic;
|
||||
canRetry = false;
|
||||
canCache = true;
|
||||
break;
|
||||
default:
|
||||
stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void State::buildRemote(ref<Store> destStore,
|
||||
::Machine::ptr machine, Step::ptr step,
|
||||
const ServeProto::BuildOptions & buildOptions,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout, unsigned int repeats,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers)
|
||||
{
|
||||
assert(BuildResult::TimedOut == 8);
|
||||
|
||||
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
|
||||
AutoDelete logFileDel(logFile, false);
|
||||
result.logFile = logFile;
|
||||
std::string base(step->drvPath.to_string());
|
||||
result.logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
|
||||
AutoDelete autoDelete(result.logFile, false);
|
||||
|
||||
createDirs(dirOf(result.logFile));
|
||||
|
||||
AutoCloseFD logFD = open(result.logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
|
||||
if (!logFD) throw SysError("creating log file ‘%s’", result.logFile);
|
||||
|
||||
nix::Path tmpDir = createTempDir();
|
||||
AutoDelete tmpDirDel(tmpDir, true);
|
||||
|
||||
try {
|
||||
|
||||
updateStep(ssConnecting);
|
||||
|
||||
SSHMaster master {
|
||||
machine->sshName,
|
||||
machine->sshKey,
|
||||
machine->sshPublicHostKey,
|
||||
false, // no SSH master yet
|
||||
false, // no compression yet
|
||||
logFD.get(),
|
||||
};
|
||||
|
||||
// FIXME: rewrite to use Store.
|
||||
auto child = build_remote::openConnection(machine, master);
|
||||
Child child;
|
||||
openConnection(machine, tmpDir, logFD.get(), child);
|
||||
|
||||
{
|
||||
auto activeStepState(activeStep->state_.lock());
|
||||
if (activeStepState->cancelled) throw Error("step cancelled");
|
||||
activeStepState->pid = child->sshPid;
|
||||
activeStepState->pid = child.pid;
|
||||
}
|
||||
|
||||
Finally clearPid([&]() {
|
||||
@@ -449,41 +222,36 @@ void State::buildRemote(ref<Store> destStore,
|
||||
process. Meh. */
|
||||
});
|
||||
|
||||
::Machine::Connection conn {
|
||||
{
|
||||
.to = child->in.get(),
|
||||
.from = child->out.get(),
|
||||
/* Handshake. */
|
||||
.remoteVersion = 0xdadbeef, // FIXME avoid dummy initialize
|
||||
},
|
||||
/*.machine =*/ machine,
|
||||
};
|
||||
FdSource from(child.from.get());
|
||||
FdSink to(child.to.get());
|
||||
|
||||
Finally updateStats([&]() {
|
||||
bytesReceived += conn.from.read;
|
||||
bytesSent += conn.to.written;
|
||||
bytesReceived += from.read;
|
||||
bytesSent += to.written;
|
||||
});
|
||||
|
||||
constexpr ServeProto::Version our_version = 0x206;
|
||||
/* Handshake. */
|
||||
unsigned int remoteVersion;
|
||||
|
||||
try {
|
||||
conn.remoteVersion = decltype(conn)::handshake(
|
||||
conn.to,
|
||||
conn.from,
|
||||
our_version,
|
||||
machine->sshName);
|
||||
to << SERVE_MAGIC_1 << 0x206;
|
||||
to.flush();
|
||||
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%1%’", machine->sshName);
|
||||
remoteVersion = readInt(from);
|
||||
if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
|
||||
throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%1%’", machine->sshName);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) < 3 && repeats > 0)
|
||||
throw Error("machine ‘%1%’ does not support repeating a build; please upgrade it to Nix 1.12", machine->sshName);
|
||||
|
||||
} catch (EndOfFile & e) {
|
||||
child->sshPid.wait();
|
||||
child.pid.wait();
|
||||
std::string s = chomp(readFile(result.logFile));
|
||||
throw Error("cannot connect to ‘%1%’: %2%", machine->sshName, s);
|
||||
}
|
||||
|
||||
// Do not attempt to speak a newer version of the protocol.
|
||||
//
|
||||
// Per https://github.com/NixOS/nix/issues/9584 should be handled as
|
||||
// part of `handshake` in upstream nix.
|
||||
conn.remoteVersion = std::min(conn.remoteVersion, our_version);
|
||||
|
||||
{
|
||||
auto info(machine->state->connectInfo.lock());
|
||||
info->consecutiveFailures = 0;
|
||||
@@ -495,12 +263,62 @@ void State::buildRemote(ref<Store> destStore,
|
||||
copy the immediate sources of the derivation and the required
|
||||
outputs of the input derivations. */
|
||||
updateStep(ssSendingInputs);
|
||||
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
|
||||
|
||||
logFileDel.cancel();
|
||||
StorePathSet inputs;
|
||||
BasicDerivation basicDrv(*step->drv);
|
||||
|
||||
for (auto & p : step->drv->inputSrcs)
|
||||
inputs.insert(p);
|
||||
|
||||
for (auto & input : step->drv->inputDrvs) {
|
||||
auto drv2 = localStore->readDerivation(input.first);
|
||||
for (auto & name : input.second) {
|
||||
if (auto i = get(drv2.outputs, name)) {
|
||||
auto outPath = i->path(*localStore, drv2.name, name);
|
||||
inputs.insert(*outPath);
|
||||
basicDrv.inputSrcs.insert(*outPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Ensure that the inputs exist in the destination store. This is
|
||||
a no-op for regular stores, but for the binary cache store,
|
||||
this will copy the inputs to the binary cache from the local
|
||||
store. */
|
||||
if (localStore != std::shared_ptr<Store>(destStore)) {
|
||||
copyClosure(*localStore, *destStore,
|
||||
step->drv->inputSrcs,
|
||||
NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
{
|
||||
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
|
||||
mc1.reset();
|
||||
MaintainCount<counter> mc2(nrStepsCopyingTo);
|
||||
|
||||
printMsg(lvlDebug, "sending closure of ‘%s’ to ‘%s’",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName);
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
/* Copy the input closure. */
|
||||
if (machine->isLocalhost()) {
|
||||
StorePathSet closure;
|
||||
destStore->computeFSClosure(inputs, closure);
|
||||
copyPaths(*destStore, *localStore, closure, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
} else {
|
||||
copyClosureTo(machine->state->sendLock, *destStore, from, to, inputs, true);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
autoDelete.cancel();
|
||||
|
||||
/* Truncate the log to get rid of messages about substitutions
|
||||
etc. on the remote system. */
|
||||
etc. on the remote system. */
|
||||
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
|
||||
throw SysError("seeking to the start of log file ‘%s’", result.logFile);
|
||||
|
||||
@@ -516,17 +334,85 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
updateStep(ssBuilding);
|
||||
|
||||
BuildResult buildResult = build_remote::performBuild(
|
||||
conn,
|
||||
*localStore,
|
||||
step->drvPath,
|
||||
resolvedDrv,
|
||||
buildOptions,
|
||||
nrStepsBuilding
|
||||
);
|
||||
to << cmdBuildDerivation << localStore->printStorePath(step->drvPath);
|
||||
writeDerivation(to, *localStore, basicDrv);
|
||||
to << maxSilentTime << buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 2)
|
||||
to << maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
to << repeats // == build-repeat
|
||||
<< step->isDeterministic; // == enforce-determinism
|
||||
}
|
||||
to.flush();
|
||||
|
||||
result.updateWithBuildResult(buildResult);
|
||||
result.startTime = time(0);
|
||||
int res;
|
||||
{
|
||||
MaintainCount<counter> mc(nrStepsBuilding);
|
||||
res = readInt(from);
|
||||
}
|
||||
result.stopTime = time(0);
|
||||
|
||||
result.errorMsg = readString(from);
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 3) {
|
||||
result.timesBuilt = readInt(from);
|
||||
result.isNonDeterministic = readInt(from);
|
||||
auto start = readInt(from);
|
||||
auto stop = readInt(from);
|
||||
if (start && start) {
|
||||
/* Note: this represents the duration of a single
|
||||
round, rather than all rounds. */
|
||||
result.startTime = start;
|
||||
result.stopTime = stop;
|
||||
}
|
||||
}
|
||||
if (GET_PROTOCOL_MINOR(remoteVersion) >= 6) {
|
||||
worker_proto::read(*localStore, from, Phantom<DrvOutputs> {});
|
||||
}
|
||||
switch ((BuildResult::Status) res) {
|
||||
case BuildResult::Built:
|
||||
result.stepStatus = bsSuccess;
|
||||
break;
|
||||
case BuildResult::Substituted:
|
||||
case BuildResult::AlreadyValid:
|
||||
result.stepStatus = bsSuccess;
|
||||
result.isCached = true;
|
||||
break;
|
||||
case BuildResult::PermanentFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::InputRejected:
|
||||
case BuildResult::OutputRejected:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canCache = true;
|
||||
break;
|
||||
case BuildResult::TransientFailure:
|
||||
result.stepStatus = bsFailed;
|
||||
result.canRetry = true;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::TimedOut:
|
||||
result.stepStatus = bsTimedOut;
|
||||
result.errorMsg = "";
|
||||
break;
|
||||
case BuildResult::MiscFailure:
|
||||
result.stepStatus = bsAborted;
|
||||
result.canRetry = true;
|
||||
break;
|
||||
case BuildResult::LogLimitExceeded:
|
||||
result.stepStatus = bsLogLimitExceeded;
|
||||
break;
|
||||
case BuildResult::NotDeterministic:
|
||||
result.stepStatus = bsNotDeterministic;
|
||||
result.canRetry = false;
|
||||
result.canCache = true;
|
||||
break;
|
||||
default:
|
||||
result.stepStatus = bsAborted;
|
||||
break;
|
||||
}
|
||||
if (result.stepStatus != bsSuccess) return;
|
||||
|
||||
result.errorMsg = "";
|
||||
@@ -540,10 +426,6 @@ void State::buildRemote(ref<Store> destStore,
|
||||
result.logFile = "";
|
||||
}
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & [_, realisation] : buildResult.builtOutputs)
|
||||
outputs.insert(realisation.outPath);
|
||||
|
||||
/* Copy the output paths. */
|
||||
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
|
||||
updateStep(ssReceivingOutputs);
|
||||
@@ -552,8 +434,39 @@ void State::buildRemote(ref<Store> destStore,
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
StorePathSet outputs;
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
|
||||
/* Get info about each output path. */
|
||||
std::map<StorePath, ValidPathInfo> infos;
|
||||
size_t totalNarSize = 0;
|
||||
auto infos = build_remote::queryPathInfos(conn, *localStore, outputs, totalNarSize);
|
||||
to << cmdQueryPathInfos;
|
||||
worker_proto::write(*localStore, to, outputs);
|
||||
to.flush();
|
||||
while (true) {
|
||||
auto storePathS = readString(from);
|
||||
if (storePathS == "") break;
|
||||
auto deriver = readString(from); // deriver
|
||||
auto references = worker_proto::read(*localStore, from, Phantom<StorePathSet> {});
|
||||
readLongLong(from); // download size
|
||||
auto narSize = readLongLong(from);
|
||||
auto narHash = Hash::parseAny(readString(from), htSHA256);
|
||||
auto ca = parseContentAddressOpt(readString(from));
|
||||
readStrings<StringSet>(from); // sigs
|
||||
ValidPathInfo info(localStore->parseStorePath(storePathS), narHash);
|
||||
assert(outputs.count(info.path));
|
||||
info.references = references;
|
||||
info.narSize = narSize;
|
||||
totalNarSize += info.narSize;
|
||||
info.narHash = narHash;
|
||||
info.ca = ca;
|
||||
if (deriver != "")
|
||||
info.deriver = localStore->parseStorePath(deriver);
|
||||
infos.insert_or_assign(info.path, info);
|
||||
}
|
||||
|
||||
if (totalNarSize > maxOutputSize) {
|
||||
result.stepStatus = bsNarSizeLimitExceeded;
|
||||
@@ -564,30 +477,41 @@ void State::buildRemote(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "copying outputs of ‘%s’ from ‘%s’ (%d bytes)",
|
||||
localStore->printStorePath(step->drvPath), machine->sshName, totalNarSize);
|
||||
|
||||
build_remote::copyPathsFromRemote(conn, narMembers, *localStore, *destStore, infos);
|
||||
auto pathsSorted = reverseTopoSortPaths(infos);
|
||||
|
||||
for (auto & path : pathsSorted) {
|
||||
auto & info = infos.find(path)->second;
|
||||
|
||||
/* Receive the NAR from the remote and add it to the
|
||||
destination store. Meanwhile, extract all the info from the
|
||||
NAR that getBuildOutput() needs. */
|
||||
auto source2 = sinkToSource([&](Sink & sink)
|
||||
{
|
||||
/* Note: we should only send the command to dump the store
|
||||
path to the remote if the NAR is actually going to get read
|
||||
by the destination store, which won't happen if this path
|
||||
is already valid on the destination store. Since this
|
||||
lambda function only gets executed if someone tries to read
|
||||
from source2, we will send the command from here rather
|
||||
than outside the lambda. */
|
||||
to << cmdDumpStorePath << localStore->printStorePath(path);
|
||||
to.flush();
|
||||
|
||||
TeeSource tee(from, sink);
|
||||
extractNarData(tee, localStore->printStorePath(path), narMembers);
|
||||
});
|
||||
|
||||
destStore->addToStore(info, *source2, NoRepair, NoCheckSigs);
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
}
|
||||
|
||||
/* Register the outputs of the newly built drv */
|
||||
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
||||
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
|
||||
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
|
||||
// Register the resolved drv output
|
||||
destStore->registerDrvOutput(realisation);
|
||||
|
||||
// Also register the unresolved one
|
||||
auto unresolvedRealisation = realisation;
|
||||
unresolvedRealisation.signatures.clear();
|
||||
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
|
||||
destStore->registerDrvOutput(unresolvedRealisation);
|
||||
}
|
||||
}
|
||||
|
||||
/* Shut down the connection. */
|
||||
child->in = -1;
|
||||
child->sshPid.wait();
|
||||
child.to = -1;
|
||||
child.pid.wait();
|
||||
|
||||
} catch (Error & e) {
|
||||
/* Disable this machine until a certain period of time has
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
#include "source-accessor.hh"
|
||||
#include "fs-accessor.hh"
|
||||
|
||||
#include <regex>
|
||||
|
||||
@@ -11,18 +11,18 @@ using namespace nix;
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const OutputPathMap derivationOutputs)
|
||||
const Derivation & drv)
|
||||
{
|
||||
BuildOutput res;
|
||||
|
||||
/* Compute the closure size. */
|
||||
StorePathSet outputs;
|
||||
StorePathSet closure;
|
||||
for (auto& [outputName, outputPath] : derivationOutputs) {
|
||||
store->computeFSClosure(outputPath, closure);
|
||||
outputs.insert(outputPath);
|
||||
res.outputs.insert({outputName, outputPath});
|
||||
}
|
||||
for (auto & i : drv.outputsAndOptPaths(*store))
|
||||
if (i.second.second) {
|
||||
store->computeFSClosure(*i.second.second, closure);
|
||||
outputs.insert(*i.second.second);
|
||||
}
|
||||
for (auto & path : closure) {
|
||||
auto info = store->queryPathInfo(path);
|
||||
res.closureSize += info->narSize;
|
||||
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
|
||||
if (productsFile == narMembers.end() ||
|
||||
productsFile->second.type != SourceAccessor::Type::tRegular)
|
||||
productsFile->second.type != FSAccessor::Type::tRegular)
|
||||
continue;
|
||||
assert(productsFile->second.contents);
|
||||
|
||||
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
|
||||
|
||||
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
|
||||
|
||||
if (file->second.type == SourceAccessor::Type::tRegular) {
|
||||
if (file->second.type == FSAccessor::Type::tRegular) {
|
||||
product.isRegular = true;
|
||||
product.fileSize = file->second.fileSize.value();
|
||||
product.sha256hash = file->second.sha256.value();
|
||||
@@ -107,16 +107,17 @@ BuildOutput getBuildOutput(
|
||||
/* If no build products were explicitly declared, then add all
|
||||
outputs as a product of type "nix-build". */
|
||||
if (!explicitProducts) {
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
for (auto & [name, output] : drv.outputs) {
|
||||
BuildProduct product;
|
||||
product.path = store->printStorePath(output);
|
||||
auto outPath = output.path(*store, drv.name, name);
|
||||
product.path = store->printStorePath(*outPath);
|
||||
product.type = "nix-build";
|
||||
product.subtype = name == "out" ? "" : name;
|
||||
product.name = output.name();
|
||||
product.name = outPath->name();
|
||||
|
||||
auto file = narMembers.find(product.path);
|
||||
assert(file != narMembers.end());
|
||||
if (file->second.type == SourceAccessor::Type::tDirectory)
|
||||
if (file->second.type == FSAccessor::Type::tDirectory)
|
||||
res.products.push_back(product);
|
||||
}
|
||||
}
|
||||
@@ -125,7 +126,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
continue;
|
||||
res.releaseName = trim(file->second.contents.value());
|
||||
// FIXME: validate release name
|
||||
@@ -135,7 +136,7 @@ BuildOutput getBuildOutput(
|
||||
for (auto & output : outputs) {
|
||||
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
|
||||
if (file == narMembers.end() ||
|
||||
file->second.type != SourceAccessor::Type::tRegular)
|
||||
file->second.type != FSAccessor::Type::tRegular)
|
||||
continue;
|
||||
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
|
||||
auto fields = tokenizeString<std::vector<std::string>>(line);
|
||||
|
||||
@@ -98,13 +98,8 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
it). */
|
||||
BuildID buildId;
|
||||
std::optional<StorePath> buildDrvPath;
|
||||
// Other fields set below
|
||||
nix::ServeProto::BuildOptions buildOptions {
|
||||
.maxLogSize = maxLogSize,
|
||||
.nrRepeats = step->isDeterministic ? 1u : 0u,
|
||||
.enforceDeterminism = step->isDeterministic,
|
||||
.keepFailed = false,
|
||||
};
|
||||
unsigned int maxSilentTime, buildTimeout;
|
||||
unsigned int repeats = step->isDeterministic ? 1 : 0;
|
||||
|
||||
auto conn(dbPool.get());
|
||||
|
||||
@@ -139,18 +134,18 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
{
|
||||
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
|
||||
if (i != jobsetRepeats.end())
|
||||
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
|
||||
repeats = std::max(repeats, i->second);
|
||||
}
|
||||
}
|
||||
if (!build) build = *dependents.begin();
|
||||
|
||||
buildId = build->id;
|
||||
buildDrvPath = build->drvPath;
|
||||
buildOptions.maxSilentTime = build->maxSilentTime;
|
||||
buildOptions.buildTimeout = build->buildTimeout;
|
||||
maxSilentTime = build->maxSilentTime;
|
||||
buildTimeout = build->buildTimeout;
|
||||
|
||||
printInfo("performing step ‘%s’ %d times on ‘%s’ (needed by build %d and %d others)",
|
||||
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
localStore->printStorePath(step->drvPath), repeats + 1, machine->sshName, buildId, (dependents.size() - 1));
|
||||
}
|
||||
|
||||
if (!buildOneDone)
|
||||
@@ -211,7 +206,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
try {
|
||||
/* FIXME: referring builds may have conflicting timeouts. */
|
||||
buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers);
|
||||
buildRemote(destStore, machine, step, maxSilentTime, buildTimeout, repeats, result, activeStep, updateStep, narMembers);
|
||||
} catch (Error & e) {
|
||||
if (activeStep->state_.lock()->cancelled) {
|
||||
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
|
||||
@@ -226,7 +221,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
updateStep(ssPostProcessing);
|
||||
res = getBuildOutput(destStore, narMembers, destStore->queryDerivationOutputMap(step->drvPath, &*localStore));
|
||||
res = getBuildOutput(destStore, narMembers, *step->drv);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,12 +275,9 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
|
||||
assert(stepNr);
|
||||
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(step->drvPath, &*localStore)) {
|
||||
if (!optOutputPath)
|
||||
throw Error(
|
||||
"Missing output %s for derivation %d which was supposed to have succeeded",
|
||||
outputName, localStore->printStorePath(step->drvPath));
|
||||
addRoot(*optOutputPath);
|
||||
for (auto & i : step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
}
|
||||
|
||||
/* Register success in the database for all Build objects that
|
||||
@@ -331,7 +323,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
|
||||
pqxx::work txn(*conn);
|
||||
|
||||
for (auto & b : direct) {
|
||||
printInfo("marking build %1% as succeeded", b->id);
|
||||
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
|
||||
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
|
||||
result.startTime, result.stopTime);
|
||||
}
|
||||
@@ -406,7 +398,7 @@ void State::failStep(
|
||||
Step::ptr step,
|
||||
BuildID buildId,
|
||||
const RemoteResult & result,
|
||||
::Machine::ptr machine,
|
||||
Machine::ptr machine,
|
||||
bool & stepFinished)
|
||||
{
|
||||
/* Register failure in the database for all Build objects that
|
||||
@@ -459,7 +451,7 @@ void State::failStep(
|
||||
/* Mark all builds that depend on this derivation as failed. */
|
||||
for (auto & build : indirect) {
|
||||
if (build->finishedInDB) continue;
|
||||
printError("marking build %1% as failed", build->id);
|
||||
printMsg(lvlError, format("marking build %1% as failed") % build->id);
|
||||
txn.exec_params0
|
||||
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
|
||||
build->id,
|
||||
|
||||
@@ -52,7 +52,7 @@ void State::dispatcher()
|
||||
{
|
||||
auto dispatcherWakeup_(dispatcherWakeup.lock());
|
||||
if (!*dispatcherWakeup_) {
|
||||
debug("dispatcher sleeping for %1%s",
|
||||
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
|
||||
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
|
||||
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
|
||||
}
|
||||
@@ -60,7 +60,7 @@ void State::dispatcher()
|
||||
}
|
||||
|
||||
} catch (std::exception & e) {
|
||||
printError("dispatcher: %s", e.what());
|
||||
printMsg(lvlError, format("dispatcher: %1%") % e.what());
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
@@ -80,118 +80,17 @@ system_time State::doDispatch()
|
||||
jobset.second->pruneSteps();
|
||||
auto s2 = jobset.second->shareUsed();
|
||||
if (s1 != s2)
|
||||
debug("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%",
|
||||
jobset.first.first, jobset.first.second, s1, s2);
|
||||
printMsg(lvlDebug, format("pruned scheduling window of ‘%1%:%2%’ from %3% to %4%")
|
||||
% jobset.first.first % jobset.first.second % s1 % s2);
|
||||
}
|
||||
}
|
||||
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Start steps until we're out of steps or slots. */
|
||||
auto sleepUntil = system_time::max();
|
||||
bool keepGoing;
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
bool alreadyScheduled = false;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
do {
|
||||
now = std::chrono::system_clock::now();
|
||||
system_time now = std::chrono::system_clock::now();
|
||||
|
||||
/* Copy the currentJobs field of each machine. This is
|
||||
necessary to ensure that the sort comparator below is
|
||||
@@ -199,7 +98,7 @@ system_time State::doDispatch()
|
||||
filter out temporarily disabled machines. */
|
||||
struct MachineInfo
|
||||
{
|
||||
::Machine::ptr machine;
|
||||
Machine::ptr machine;
|
||||
unsigned long currentJobs;
|
||||
};
|
||||
std::vector<MachineInfo> machinesSorted;
|
||||
@@ -239,6 +138,104 @@ system_time State::doDispatch()
|
||||
a.currentJobs > b.currentJobs;
|
||||
});
|
||||
|
||||
/* Sort the runnable steps by priority. Priority is establised
|
||||
as follows (in order of precedence):
|
||||
|
||||
- The global priority of the builds that depend on the
|
||||
step. This allows admins to bump a build to the front of
|
||||
the queue.
|
||||
|
||||
- The lowest used scheduling share of the jobsets depending
|
||||
on the step.
|
||||
|
||||
- The local priority of the build, as set via the build's
|
||||
meta.schedulingPriority field. Note that this is not
|
||||
quite correct: the local priority should only be used to
|
||||
establish priority between builds in the same jobset, but
|
||||
here it's used between steps in different jobsets if they
|
||||
happen to have the same lowest used scheduling share. But
|
||||
that's not very likely.
|
||||
|
||||
- The lowest ID of the builds depending on the step;
|
||||
i.e. older builds take priority over new ones.
|
||||
|
||||
FIXME: O(n lg n); obviously, it would be better to keep a
|
||||
runnable queue sorted by priority. */
|
||||
struct StepInfo
|
||||
{
|
||||
Step::ptr step;
|
||||
|
||||
/* The lowest share used of any jobset depending on this
|
||||
step. */
|
||||
double lowestShareUsed = 1e9;
|
||||
|
||||
/* Info copied from step->state to ensure that the
|
||||
comparator is a partial ordering (see MachineInfo). */
|
||||
int highestGlobalPriority;
|
||||
int highestLocalPriority;
|
||||
BuildID lowestBuildID;
|
||||
|
||||
StepInfo(Step::ptr step, Step::State & step_) : step(step)
|
||||
{
|
||||
for (auto & jobset : step_.jobsets)
|
||||
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
|
||||
highestGlobalPriority = step_.highestGlobalPriority;
|
||||
highestLocalPriority = step_.highestLocalPriority;
|
||||
lowestBuildID = step_.lowestBuildID;
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<StepInfo> runnableSorted;
|
||||
|
||||
struct RunnablePerType
|
||||
{
|
||||
unsigned int count{0};
|
||||
std::chrono::seconds waitTime{0};
|
||||
};
|
||||
|
||||
std::unordered_map<std::string, RunnablePerType> runnablePerType;
|
||||
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
runnableSorted.reserve(runnable_->size());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
|
||||
auto step = i->lock();
|
||||
|
||||
/* Remove dead steps. */
|
||||
if (!step) {
|
||||
i = runnable_->erase(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
++i;
|
||||
|
||||
auto & r = runnablePerType[step->systemType];
|
||||
r.count++;
|
||||
|
||||
/* Skip previously failed steps that aren't ready
|
||||
to be retried. */
|
||||
auto step_(step->state.lock());
|
||||
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
|
||||
if (step_->tries > 0 && step_->after > now) {
|
||||
if (step_->after < sleepUntil)
|
||||
sleepUntil = step_->after;
|
||||
continue;
|
||||
}
|
||||
|
||||
runnableSorted.emplace_back(step, *step_);
|
||||
}
|
||||
}
|
||||
|
||||
sort(runnableSorted.begin(), runnableSorted.end(),
|
||||
[](const StepInfo & a, const StepInfo & b)
|
||||
{
|
||||
return
|
||||
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
|
||||
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
|
||||
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
|
||||
a.lowestBuildID < b.lowestBuildID;
|
||||
});
|
||||
|
||||
/* Find a machine with a free slot and find a step to run
|
||||
on it. Once we find such a pair, we restart the outer
|
||||
loop because the machine sorting will have changed. */
|
||||
@@ -248,8 +245,6 @@ system_time State::doDispatch()
|
||||
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
|
||||
|
||||
for (auto & stepInfo : runnableSorted) {
|
||||
if (stepInfo.alreadyScheduled) continue;
|
||||
|
||||
auto & step(stepInfo.step);
|
||||
|
||||
/* Can this machine do this step? */
|
||||
@@ -276,8 +271,6 @@ system_time State::doDispatch()
|
||||
r.count--;
|
||||
}
|
||||
|
||||
stepInfo.alreadyScheduled = true;
|
||||
|
||||
/* Make a slot reservation and start a thread to
|
||||
do the build. */
|
||||
auto builderThread = std::thread(&State::builder, this,
|
||||
@@ -435,7 +428,7 @@ void Jobset::pruneSteps()
|
||||
}
|
||||
|
||||
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, ::Machine::ptr machine)
|
||||
State::MachineReservation::MachineReservation(State & state, Step::ptr step, Machine::ptr machine)
|
||||
: state(state), step(step), machine(machine)
|
||||
{
|
||||
machine->state->currentJobs++;
|
||||
|
||||
@@ -36,12 +36,10 @@ struct BuildOutput
|
||||
|
||||
std::list<BuildProduct> products;
|
||||
|
||||
std::map<std::string, nix::StorePath> outputs;
|
||||
|
||||
std::map<std::string, BuildMetric> metrics;
|
||||
};
|
||||
|
||||
BuildOutput getBuildOutput(
|
||||
nix::ref<nix::Store> store,
|
||||
NarMemberDatas & narMembers,
|
||||
const nix::OutputPathMap derivationOutputs);
|
||||
const nix::Derivation & drv);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
@@ -9,9 +8,6 @@
|
||||
|
||||
#include <prometheus/exposer.h>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
#include "signals.hh"
|
||||
#include "state.hh"
|
||||
#include "hydra-build-result.hh"
|
||||
#include "store-api.hh"
|
||||
@@ -19,11 +15,20 @@
|
||||
|
||||
#include "globals.hh"
|
||||
#include "hydra-config.hh"
|
||||
#include "json.hh"
|
||||
#include "s3-binary-cache-store.hh"
|
||||
#include "shared.hh"
|
||||
|
||||
using namespace nix;
|
||||
using nlohmann::json;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
|
||||
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
|
||||
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
|
||||
|
||||
}
|
||||
|
||||
|
||||
std::string getEnvOrDie(const std::string & key)
|
||||
@@ -141,51 +146,33 @@ void State::parseMachines(const std::string & contents)
|
||||
if (tokens.size() < 3) continue;
|
||||
tokens.resize(8);
|
||||
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
auto supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
auto mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
|
||||
for (auto & f : mandatoryFeatures)
|
||||
supportedFeatures.insert(f);
|
||||
|
||||
using MaxJobs = std::remove_const<decltype(nix::Machine::maxJobs)>::type;
|
||||
|
||||
auto machine = std::make_shared<::Machine>(nix::Machine {
|
||||
// `storeUri`, not yet used
|
||||
"",
|
||||
// `systemTypes`
|
||||
tokenizeString<StringSet>(tokens[1], ","),
|
||||
// `sshKey`
|
||||
tokens[2] == "-" ? "" : tokens[2],
|
||||
// `maxJobs`
|
||||
tokens[3] != ""
|
||||
? string2Int<MaxJobs>(tokens[3]).value()
|
||||
: 1,
|
||||
// `speedFactor`
|
||||
atof(tokens[4].c_str()),
|
||||
// `supportedFeatures`
|
||||
std::move(supportedFeatures),
|
||||
// `mandatoryFeatures`
|
||||
std::move(mandatoryFeatures),
|
||||
// `sshPublicHostKey`
|
||||
tokens[7] != "" && tokens[7] != "-"
|
||||
? tokens[7]
|
||||
: "",
|
||||
});
|
||||
|
||||
auto machine = std::make_shared<Machine>();
|
||||
machine->sshName = tokens[0];
|
||||
machine->systemTypes = tokenizeString<StringSet>(tokens[1], ",");
|
||||
machine->sshKey = tokens[2] == "-" ? std::string("") : tokens[2];
|
||||
if (tokens[3] != "")
|
||||
machine->maxJobs = string2Int<decltype(machine->maxJobs)>(tokens[3]).value();
|
||||
else
|
||||
machine->maxJobs = 1;
|
||||
machine->speedFactor = atof(tokens[4].c_str());
|
||||
if (tokens[5] == "-") tokens[5] = "";
|
||||
machine->supportedFeatures = tokenizeString<StringSet>(tokens[5], ",");
|
||||
if (tokens[6] == "-") tokens[6] = "";
|
||||
machine->mandatoryFeatures = tokenizeString<StringSet>(tokens[6], ",");
|
||||
for (auto & f : machine->mandatoryFeatures)
|
||||
machine->supportedFeatures.insert(f);
|
||||
if (tokens[7] != "" && tokens[7] != "-")
|
||||
machine->sshPublicHostKey = base64Decode(tokens[7]);
|
||||
|
||||
/* Re-use the State object of the previous machine with the
|
||||
same name. */
|
||||
auto i = oldMachines.find(machine->sshName);
|
||||
if (i == oldMachines.end())
|
||||
printMsg(lvlChatty, "adding new machine ‘%1%’", machine->sshName);
|
||||
printMsg(lvlChatty, format("adding new machine ‘%1%’") % machine->sshName);
|
||||
else
|
||||
printMsg(lvlChatty, "updating machine ‘%1%’", machine->sshName);
|
||||
printMsg(lvlChatty, format("updating machine ‘%1%’") % machine->sshName);
|
||||
machine->state = i == oldMachines.end()
|
||||
? std::make_shared<::Machine::State>()
|
||||
? std::make_shared<Machine::State>()
|
||||
: i->second->state;
|
||||
newMachines[machine->sshName] = machine;
|
||||
}
|
||||
@@ -193,10 +180,10 @@ void State::parseMachines(const std::string & contents)
|
||||
for (auto & m : oldMachines)
|
||||
if (newMachines.find(m.first) == newMachines.end()) {
|
||||
if (m.second->enabled)
|
||||
printInfo("removing machine ‘%1%’", m.first);
|
||||
/* Add a disabled ::Machine object to make sure stats are
|
||||
printMsg(lvlInfo, format("removing machine ‘%1%’") % m.first);
|
||||
/* Add a disabled Machine object to make sure stats are
|
||||
maintained. */
|
||||
auto machine = std::make_shared<::Machine>(*(m.second));
|
||||
auto machine = std::make_shared<Machine>(*(m.second));
|
||||
machine->enabled = false;
|
||||
newMachines[m.first] = machine;
|
||||
}
|
||||
@@ -224,7 +211,7 @@ void State::monitorMachinesFile()
|
||||
parseMachines("localhost " +
|
||||
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
|
||||
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
|
||||
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
|
||||
+ concatStringsSep(",", settings.systemFeatures.get()));
|
||||
machinesReadyLock.unlock();
|
||||
return;
|
||||
}
|
||||
@@ -331,13 +318,10 @@ unsigned int State::createBuildStep(pqxx::work & txn, time_t startTime, BuildID
|
||||
|
||||
if (r.affected_rows() == 0) goto restart;
|
||||
|
||||
for (auto & [name, output] : getDestStore()->queryPartialDerivationOutputMap(step->drvPath, &*localStore))
|
||||
for (auto & [name, output] : step->drv->outputs)
|
||||
txn.exec_params0
|
||||
("insert into BuildStepOutputs (build, stepnr, name, path) values ($1, $2, $3, $4)",
|
||||
buildId, stepNr, name,
|
||||
output
|
||||
? std::optional { localStore->printStorePath(*output)}
|
||||
: std::nullopt);
|
||||
buildId, stepNr, name, localStore->printStorePath(*output.path(*localStore, step->drv->name, name)));
|
||||
|
||||
if (status == bsBusy)
|
||||
txn.exec(fmt("notify step_started, '%d\t%d'", buildId, stepNr));
|
||||
@@ -374,23 +358,11 @@ void State::finishBuildStep(pqxx::work & txn, const RemoteResult & result,
|
||||
assert(result.logFile.find('\t') == std::string::npos);
|
||||
txn.exec(fmt("notify step_finished, '%d\t%d\t%s'",
|
||||
buildId, stepNr, result.logFile));
|
||||
|
||||
if (result.stepStatus == bsSuccess) {
|
||||
// Update the corresponding `BuildStepOutputs` row to add the output path
|
||||
auto res = txn.exec_params1("select drvPath from BuildSteps where build = $1 and stepnr = $2", buildId, stepNr);
|
||||
assert(res.size());
|
||||
StorePath drvPath = localStore->parseStorePath(res[0].as<std::string>());
|
||||
// If we've finished building, all the paths should be known
|
||||
for (auto & [name, output] : getDestStore()->queryDerivationOutputMap(drvPath, &*localStore))
|
||||
txn.exec_params0
|
||||
("update BuildStepOutputs set path = $4 where build = $1 and stepnr = $2 and name = $3",
|
||||
buildId, stepNr, name, localStore->printStorePath(output));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int State::createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const StorePath & storePath)
|
||||
Build::ptr build, const StorePath & drvPath, const std::string & outputName, const StorePath & storePath)
|
||||
{
|
||||
restart:
|
||||
auto stepNr = allocBuildStep(txn, build->id);
|
||||
@@ -491,15 +463,6 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
res.releaseName != "" ? std::make_optional(res.releaseName) : std::nullopt,
|
||||
isCachedBuild ? 1 : 0);
|
||||
|
||||
for (auto & [outputName, outputPath] : res.outputs) {
|
||||
txn.exec_params0
|
||||
("update BuildOutputs set path = $3 where build = $1 and name = $2",
|
||||
build->id,
|
||||
outputName,
|
||||
localStore->printStorePath(outputPath)
|
||||
);
|
||||
}
|
||||
|
||||
txn.exec_params0("delete from BuildProducts where build = $1", build->id);
|
||||
|
||||
unsigned int productNr = 1;
|
||||
@@ -511,7 +474,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
|
||||
product.type,
|
||||
product.subtype,
|
||||
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
|
||||
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
|
||||
product.path,
|
||||
product.name,
|
||||
product.defaultPath);
|
||||
@@ -579,168 +542,181 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
|
||||
|
||||
void State::dumpStatus(Connection & conn)
|
||||
{
|
||||
time_t now = time(0);
|
||||
json statusJson = {
|
||||
{"status", "up"},
|
||||
{"time", time(0)},
|
||||
{"uptime", now - startedAt},
|
||||
{"pid", getpid()},
|
||||
std::ostringstream out;
|
||||
|
||||
{"nrQueuedBuilds", builds.lock()->size()},
|
||||
{"nrActiveSteps", activeSteps_.lock()->size()},
|
||||
{"nrStepsBuilding", nrStepsBuilding.load()},
|
||||
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
|
||||
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
|
||||
{"nrStepsWaiting", nrStepsWaiting.load()},
|
||||
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
|
||||
{"bytesSent", bytesSent.load()},
|
||||
{"bytesReceived", bytesReceived.load()},
|
||||
{"nrBuildsRead", nrBuildsRead.load()},
|
||||
{"buildReadTimeMs", buildReadTimeMs.load()},
|
||||
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
|
||||
{"nrBuildsDone", nrBuildsDone.load()},
|
||||
{"nrStepsStarted", nrStepsStarted.load()},
|
||||
{"nrStepsDone", nrStepsDone.load()},
|
||||
{"nrRetries", nrRetries.load()},
|
||||
{"maxNrRetries", maxNrRetries.load()},
|
||||
{"nrQueueWakeups", nrQueueWakeups.load()},
|
||||
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
|
||||
{"dispatchTimeMs", dispatchTimeMs.load()},
|
||||
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
|
||||
{"nrDbConnections", dbPool.count()},
|
||||
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
|
||||
};
|
||||
{
|
||||
JSONObject root(out);
|
||||
time_t now = time(0);
|
||||
root.attr("status", "up");
|
||||
root.attr("time", time(0));
|
||||
root.attr("uptime", now - startedAt);
|
||||
root.attr("pid", getpid());
|
||||
{
|
||||
auto builds_(builds.lock());
|
||||
root.attr("nrQueuedBuilds", builds_->size());
|
||||
}
|
||||
{
|
||||
auto steps_(steps.lock());
|
||||
for (auto i = steps_->begin(); i != steps_->end(); )
|
||||
if (i->second.lock()) ++i; else i = steps_->erase(i);
|
||||
statusJson["nrUnfinishedSteps"] = steps_->size();
|
||||
root.attr("nrUnfinishedSteps", steps_->size());
|
||||
}
|
||||
{
|
||||
auto runnable_(runnable.lock());
|
||||
for (auto i = runnable_->begin(); i != runnable_->end(); )
|
||||
if (i->lock()) ++i; else i = runnable_->erase(i);
|
||||
statusJson["nrRunnableSteps"] = runnable_->size();
|
||||
root.attr("nrRunnableSteps", runnable_->size());
|
||||
}
|
||||
root.attr("nrActiveSteps", activeSteps_.lock()->size());
|
||||
root.attr("nrStepsBuilding", nrStepsBuilding);
|
||||
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
|
||||
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
|
||||
root.attr("nrStepsWaiting", nrStepsWaiting);
|
||||
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
|
||||
root.attr("bytesSent", bytesSent);
|
||||
root.attr("bytesReceived", bytesReceived);
|
||||
root.attr("nrBuildsRead", nrBuildsRead);
|
||||
root.attr("buildReadTimeMs", buildReadTimeMs);
|
||||
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
|
||||
root.attr("nrBuildsDone", nrBuildsDone);
|
||||
root.attr("nrStepsStarted", nrStepsStarted);
|
||||
root.attr("nrStepsDone", nrStepsDone);
|
||||
root.attr("nrRetries", nrRetries);
|
||||
root.attr("maxNrRetries", maxNrRetries);
|
||||
if (nrStepsDone) {
|
||||
statusJson["totalStepTime"] = totalStepTime.load();
|
||||
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
|
||||
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
|
||||
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
|
||||
root.attr("totalStepTime", totalStepTime);
|
||||
root.attr("totalStepBuildTime", totalStepBuildTime);
|
||||
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
|
||||
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
|
||||
}
|
||||
root.attr("nrQueueWakeups", nrQueueWakeups);
|
||||
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
|
||||
root.attr("dispatchTimeMs", dispatchTimeMs);
|
||||
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
|
||||
root.attr("nrDbConnections", dbPool.count());
|
||||
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
|
||||
|
||||
{
|
||||
auto nested = root.object("machines");
|
||||
auto machines_(machines.lock());
|
||||
for (auto & i : *machines_) {
|
||||
auto & m(i.second);
|
||||
auto & s(m->state);
|
||||
auto info(m->state->connectInfo.lock());
|
||||
auto nested2 = nested.object(m->sshName);
|
||||
nested2.attr("enabled", m->enabled);
|
||||
|
||||
json machine = {
|
||||
{"enabled", m->enabled},
|
||||
{"systemTypes", m->systemTypes},
|
||||
{"supportedFeatures", m->supportedFeatures},
|
||||
{"mandatoryFeatures", m->mandatoryFeatures},
|
||||
{"nrStepsDone", s->nrStepsDone.load()},
|
||||
{"currentJobs", s->currentJobs.load()},
|
||||
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
|
||||
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
|
||||
{"consecutiveFailures", info->consecutiveFailures},
|
||||
};
|
||||
|
||||
if (s->currentJobs == 0)
|
||||
machine["idleSince"] = s->idleSince.load();
|
||||
if (m->state->nrStepsDone) {
|
||||
machine["totalStepTime"] = s->totalStepTime.load();
|
||||
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
|
||||
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
|
||||
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
|
||||
{
|
||||
auto list = nested2.list("systemTypes");
|
||||
for (auto & s : m->systemTypes)
|
||||
list.elem(s);
|
||||
}
|
||||
statusJson["machines"][m->sshName] = machine;
|
||||
|
||||
{
|
||||
auto list = nested2.list("supportedFeatures");
|
||||
for (auto & s : m->supportedFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
{
|
||||
auto list = nested2.list("mandatoryFeatures");
|
||||
for (auto & s : m->mandatoryFeatures)
|
||||
list.elem(s);
|
||||
}
|
||||
|
||||
nested2.attr("currentJobs", s->currentJobs);
|
||||
if (s->currentJobs == 0)
|
||||
nested2.attr("idleSince", s->idleSince);
|
||||
nested2.attr("nrStepsDone", s->nrStepsDone);
|
||||
if (m->state->nrStepsDone) {
|
||||
nested2.attr("totalStepTime", s->totalStepTime);
|
||||
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
|
||||
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
|
||||
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
|
||||
}
|
||||
|
||||
auto info(m->state->connectInfo.lock());
|
||||
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
|
||||
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
|
||||
nested2.attr("consecutiveFailures", info->consecutiveFailures);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto jobsets_json = json::object();
|
||||
auto nested = root.object("jobsets");
|
||||
auto jobsets_(jobsets.lock());
|
||||
for (auto & jobset : *jobsets_) {
|
||||
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
|
||||
{"shareUsed", jobset.second->shareUsed()},
|
||||
{"seconds", jobset.second->getSeconds()},
|
||||
};
|
||||
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
|
||||
nested2.attr("shareUsed", jobset.second->shareUsed());
|
||||
nested2.attr("seconds", jobset.second->getSeconds());
|
||||
}
|
||||
statusJson["jobsets"] = jobsets_json;
|
||||
}
|
||||
|
||||
{
|
||||
auto machineTypesJson = json::object();
|
||||
auto nested = root.object("machineTypes");
|
||||
auto machineTypes_(machineTypes.lock());
|
||||
for (auto & i : *machineTypes_) {
|
||||
auto machineTypeJson = machineTypesJson[i.first] = {
|
||||
{"runnable", i.second.runnable},
|
||||
{"running", i.second.running},
|
||||
};
|
||||
auto nested2 = nested.object(i.first);
|
||||
nested2.attr("runnable", i.second.runnable);
|
||||
nested2.attr("running", i.second.running);
|
||||
if (i.second.runnable > 0)
|
||||
machineTypeJson["waitTime"] = i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck);
|
||||
nested2.attr("waitTime", i.second.waitTime.count() +
|
||||
i.second.runnable * (time(0) - lastDispatcherCheck));
|
||||
if (i.second.running == 0)
|
||||
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
|
||||
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
|
||||
}
|
||||
statusJson["machineTypes"] = machineTypesJson;
|
||||
}
|
||||
|
||||
auto store = getDestStore();
|
||||
|
||||
auto nested = root.object("store");
|
||||
|
||||
auto & stats = store->getStats();
|
||||
statusJson["store"] = {
|
||||
{"narInfoRead", stats.narInfoRead.load()},
|
||||
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
|
||||
{"narInfoMissing", stats.narInfoMissing.load()},
|
||||
{"narInfoWrite", stats.narInfoWrite.load()},
|
||||
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
|
||||
{"narRead", stats.narRead.load()},
|
||||
{"narReadBytes", stats.narReadBytes.load()},
|
||||
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
|
||||
{"narWrite", stats.narWrite.load()},
|
||||
{"narWriteAverted", stats.narWriteAverted.load()},
|
||||
{"narWriteBytes", stats.narWriteBytes.load()},
|
||||
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
|
||||
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
|
||||
{"narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0},
|
||||
{"narCompressionSpeed", // MiB/s
|
||||
nested.attr("narInfoRead", stats.narInfoRead);
|
||||
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
|
||||
nested.attr("narInfoMissing", stats.narInfoMissing);
|
||||
nested.attr("narInfoWrite", stats.narInfoWrite);
|
||||
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
|
||||
nested.attr("narRead", stats.narRead);
|
||||
nested.attr("narReadBytes", stats.narReadBytes);
|
||||
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
|
||||
nested.attr("narWrite", stats.narWrite);
|
||||
nested.attr("narWriteAverted", stats.narWriteAverted);
|
||||
nested.attr("narWriteBytes", stats.narWriteBytes);
|
||||
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
|
||||
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
|
||||
nested.attr("narCompressionSavings",
|
||||
stats.narWriteBytes
|
||||
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
|
||||
: 0.0);
|
||||
nested.attr("narCompressionSpeed", // MiB/s
|
||||
stats.narWriteCompressionTimeMs
|
||||
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
};
|
||||
: 0.0);
|
||||
|
||||
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
|
||||
if (s3Store) {
|
||||
auto nested2 = nested.object("s3");
|
||||
auto & s3Stats = s3Store->getS3Stats();
|
||||
auto jsonS3 = statusJson["s3"] = {
|
||||
{"put", s3Stats.put.load()},
|
||||
{"putBytes", s3Stats.putBytes.load()},
|
||||
{"putTimeMs", s3Stats.putTimeMs.load()},
|
||||
{"putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"get", s3Stats.get.load()},
|
||||
{"getBytes", s3Stats.getBytes.load()},
|
||||
{"getTimeMs", s3Stats.getTimeMs.load()},
|
||||
{"getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0},
|
||||
{"head", s3Stats.head.load()},
|
||||
{"costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
|
||||
};
|
||||
nested2.attr("put", s3Stats.put);
|
||||
nested2.attr("putBytes", s3Stats.putBytes);
|
||||
nested2.attr("putTimeMs", s3Stats.putTimeMs);
|
||||
nested2.attr("putSpeed",
|
||||
s3Stats.putTimeMs
|
||||
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("get", s3Stats.get);
|
||||
nested2.attr("getBytes", s3Stats.getBytes);
|
||||
nested2.attr("getTimeMs", s3Stats.getTimeMs);
|
||||
nested2.attr("getSpeed",
|
||||
s3Stats.getTimeMs
|
||||
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
|
||||
: 0.0);
|
||||
nested2.attr("head", s3Stats.head);
|
||||
nested2.attr("costDollarApprox",
|
||||
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
|
||||
+ s3Stats.put / 1000.0 * 0.005 +
|
||||
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -749,7 +725,7 @@ void State::dumpStatus(Connection & conn)
|
||||
pqxx::work txn(conn);
|
||||
// FIXME: use PostgreSQL 9.5 upsert.
|
||||
txn.exec("delete from SystemStatus where what = 'queue-runner'");
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
|
||||
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
|
||||
txn.exec("notify status_dumped");
|
||||
txn.commit();
|
||||
}
|
||||
@@ -926,17 +902,10 @@ void State::run(BuildID buildOne)
|
||||
while (true) {
|
||||
try {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (pqxx::broken_connection & connEx) {
|
||||
printMsg(lvlError, "main thread: %s", connEx.what());
|
||||
printMsg(lvlError, "main thread: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
receiver dumpStatus_(*conn, "dump_status");
|
||||
while (true) {
|
||||
conn->await_notification();
|
||||
dumpStatus(*conn);
|
||||
}
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError, "main thread: %s", e.what());
|
||||
@@ -981,6 +950,7 @@ int main(int argc, char * * argv)
|
||||
});
|
||||
|
||||
settings.verboseBuild = true;
|
||||
settings.lockCPU = false;
|
||||
|
||||
State state{metricsAddrOpt};
|
||||
if (status)
|
||||
|
||||
@@ -6,46 +6,7 @@
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
struct NarMemberConstructor : CreateRegularFileSink
|
||||
{
|
||||
NarMemberData & curMember;
|
||||
|
||||
HashSink hashSink = HashSink { HashAlgorithm::SHA256 };
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
|
||||
NarMemberConstructor(NarMemberData & curMember)
|
||||
: curMember(curMember)
|
||||
{ }
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
}
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
}
|
||||
|
||||
void operator () (std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
*curMember.fileSize += data.size();
|
||||
hashSink(data);
|
||||
if (curMember.contents) {
|
||||
curMember.contents->append(data);
|
||||
}
|
||||
assert(curMember.fileSize <= expectedSize);
|
||||
if (curMember.fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink.finish();
|
||||
assert(curMember.fileSize == len);
|
||||
curMember.sha256 = hash;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct Extractor : FileSystemObjectSink
|
||||
struct Extractor : ParseSink
|
||||
{
|
||||
std::unordered_set<Path> filesToKeep {
|
||||
"/nix-support/hydra-build-products",
|
||||
@@ -54,6 +15,7 @@ struct Extractor : FileSystemObjectSink
|
||||
};
|
||||
|
||||
NarMemberDatas & members;
|
||||
NarMemberData * curMember = nullptr;
|
||||
Path prefix;
|
||||
|
||||
Extractor(NarMemberDatas & members, const Path & prefix)
|
||||
@@ -62,24 +24,49 @@ struct Extractor : FileSystemObjectSink
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tDirectory });
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path, std::function<void(CreateRegularFileSink &)> func) override
|
||||
void createRegularFile(const Path & path) override
|
||||
{
|
||||
NarMemberConstructor nmc {
|
||||
members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = SourceAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second,
|
||||
};
|
||||
func(nmc);
|
||||
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
|
||||
.type = FSAccessor::Type::tRegular,
|
||||
.fileSize = 0,
|
||||
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
|
||||
}).first->second;
|
||||
}
|
||||
|
||||
std::optional<uint64_t> expectedSize;
|
||||
std::unique_ptr<HashSink> hashSink;
|
||||
|
||||
void preallocateContents(uint64_t size) override
|
||||
{
|
||||
expectedSize = size;
|
||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
||||
}
|
||||
|
||||
void receiveContents(std::string_view data) override
|
||||
{
|
||||
assert(expectedSize);
|
||||
assert(curMember);
|
||||
assert(hashSink);
|
||||
*curMember->fileSize += data.size();
|
||||
(*hashSink)(data);
|
||||
if (curMember->contents) {
|
||||
curMember->contents->append(data);
|
||||
}
|
||||
assert(curMember->fileSize <= expectedSize);
|
||||
if (curMember->fileSize == expectedSize) {
|
||||
auto [hash, len] = hashSink->finish();
|
||||
assert(curMember->fileSize == len);
|
||||
curMember->sha256 = hash;
|
||||
hashSink.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const std::string & target) override
|
||||
{
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tSymlink });
|
||||
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include "source-accessor.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "types.hh"
|
||||
#include "serialise.hh"
|
||||
#include "hash.hh"
|
||||
|
||||
struct NarMemberData
|
||||
{
|
||||
nix::SourceAccessor::Type type;
|
||||
nix::FSAccessor::Type type;
|
||||
std::optional<uint64_t> fileSize;
|
||||
std::optional<std::string> contents;
|
||||
std::optional<nix::Hash> sha256;
|
||||
|
||||
@@ -10,30 +10,26 @@ using namespace nix;
|
||||
void State::queueMonitor()
|
||||
{
|
||||
while (true) {
|
||||
auto conn(dbPool.get());
|
||||
try {
|
||||
queueMonitorLoop(*conn);
|
||||
} catch (pqxx::broken_connection & e) {
|
||||
printMsg(lvlError, "queue monitor: %s", e.what());
|
||||
printMsg(lvlError, "queue monitor: Reconnecting in 10s");
|
||||
conn.markBad();
|
||||
sleep(10);
|
||||
queueMonitorLoop();
|
||||
} catch (std::exception & e) {
|
||||
printError("queue monitor: %s", e.what());
|
||||
printMsg(lvlError, format("queue monitor: %1%") % e.what());
|
||||
sleep(10); // probably a DB problem, so don't retry right away
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void State::queueMonitorLoop(Connection & conn)
|
||||
void State::queueMonitorLoop()
|
||||
{
|
||||
receiver buildsAdded(conn, "builds_added");
|
||||
receiver buildsRestarted(conn, "builds_restarted");
|
||||
receiver buildsCancelled(conn, "builds_cancelled");
|
||||
receiver buildsDeleted(conn, "builds_deleted");
|
||||
receiver buildsBumped(conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(conn, "jobset_shares_changed");
|
||||
auto conn(dbPool.get());
|
||||
|
||||
receiver buildsAdded(*conn, "builds_added");
|
||||
receiver buildsRestarted(*conn, "builds_restarted");
|
||||
receiver buildsCancelled(*conn, "builds_cancelled");
|
||||
receiver buildsDeleted(*conn, "builds_deleted");
|
||||
receiver buildsBumped(*conn, "builds_bumped");
|
||||
receiver jobsetSharesChanged(*conn, "jobset_shares_changed");
|
||||
|
||||
auto destStore = getDestStore();
|
||||
|
||||
@@ -43,17 +39,17 @@ void State::queueMonitorLoop(Connection & conn)
|
||||
while (!quit) {
|
||||
localStore->clearPathInfoCache();
|
||||
|
||||
bool done = getQueuedBuilds(conn, destStore, lastBuildId);
|
||||
bool done = getQueuedBuilds(*conn, destStore, lastBuildId);
|
||||
|
||||
if (buildOne && buildOneDone) quit = true;
|
||||
|
||||
/* Sleep until we get notification from the database about an
|
||||
event. */
|
||||
if (done && !quit) {
|
||||
conn.await_notification();
|
||||
conn->await_notification();
|
||||
nrQueueWakeups++;
|
||||
} else
|
||||
conn.get_notifs();
|
||||
conn->get_notifs();
|
||||
|
||||
if (auto lowestId = buildsAdded.get()) {
|
||||
lastBuildId = std::min(lastBuildId, static_cast<unsigned>(std::stoul(*lowestId) - 1));
|
||||
@@ -65,11 +61,11 @@ void State::queueMonitorLoop(Connection & conn)
|
||||
}
|
||||
if (buildsCancelled.get() || buildsDeleted.get() || buildsBumped.get()) {
|
||||
printMsg(lvlTalkative, "got notification: builds cancelled or bumped");
|
||||
processQueueChange(conn);
|
||||
processQueueChange(*conn);
|
||||
}
|
||||
if (jobsetSharesChanged.get()) {
|
||||
printMsg(lvlTalkative, "got notification: jobset shares changed");
|
||||
processJobsetSharesChange(conn);
|
||||
processJobsetSharesChange(*conn);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
createBuild = [&](Build::ptr build) {
|
||||
prom.queue_build_loads.Increment();
|
||||
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
|
||||
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
|
||||
nrAdded++;
|
||||
newBuildsByID.erase(build->id);
|
||||
|
||||
if (!localStore->isValidPath(build->drvPath)) {
|
||||
/* Derivation has been GC'ed prematurely. */
|
||||
printError("aborting GC'ed build %1%", build->id);
|
||||
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
|
||||
if (!build->finishedInDB) {
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
@@ -196,19 +192,15 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (!res[0].is_null()) propagatedFrom = res[0].as<BuildID>();
|
||||
|
||||
if (!propagatedFrom) {
|
||||
for (auto & [outputName, optOutputPath] : destStore->queryPartialDerivationOutputMap(ex.step->drvPath, &*localStore)) {
|
||||
constexpr std::string_view common = "select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where startTime != 0 and stopTime != 0 and status = 1";
|
||||
auto res = optOutputPath
|
||||
? txn.exec_params(
|
||||
std::string { common } + " and path = $1",
|
||||
localStore->printStorePath(*optOutputPath))
|
||||
: txn.exec_params(
|
||||
std::string { common } + " and drvPath = $1 and name = $2",
|
||||
localStore->printStorePath(ex.step->drvPath),
|
||||
outputName);
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
for (auto & i : ex.step->drv->outputsAndOptPaths(*localStore)) {
|
||||
if (i.second.second) {
|
||||
auto res = txn.exec_params
|
||||
("select max(s.build) from BuildSteps s join BuildStepOutputs o on s.build = o.build where path = $1 and startTime != 0 and stopTime != 0 and status = 1",
|
||||
localStore->printStorePath(*i.second.second));
|
||||
if (!res[0][0].is_null()) {
|
||||
propagatedFrom = res[0][0].as<BuildID>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -244,10 +236,12 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
/* If we didn't get a step, it means the step's outputs are
|
||||
all valid. So we mark this as a finished, cached build. */
|
||||
if (!step) {
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, build->drvPath);
|
||||
auto drv = localStore->readDerivation(build->drvPath);
|
||||
BuildOutput res = getBuildOutputCached(conn, destStore, drv);
|
||||
|
||||
for (auto & i : destStore->queryDerivationOutputMap(build->drvPath, &*localStore))
|
||||
addRoot(i.second);
|
||||
for (auto & i : drv.outputsAndOptPaths(*localStore))
|
||||
if (i.second.second)
|
||||
addRoot(*i.second.second);
|
||||
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
@@ -308,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
|
||||
/* Add the new runnable build steps to ‘runnable’ and wake up
|
||||
the builder threads. */
|
||||
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
|
||||
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
|
||||
for (auto & r : newRunnable)
|
||||
makeRunnable(r);
|
||||
|
||||
@@ -321,7 +315,7 @@ bool State::getQueuedBuilds(Connection & conn,
|
||||
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
|
||||
prom.queue_checks_early_exits.Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prom.queue_checks_finished.Increment();
|
||||
@@ -364,13 +358,13 @@ void State::processQueueChange(Connection & conn)
|
||||
for (auto i = builds_->begin(); i != builds_->end(); ) {
|
||||
auto b = currentIds.find(i->first);
|
||||
if (b == currentIds.end()) {
|
||||
printInfo("discarding cancelled build %1%", i->first);
|
||||
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
|
||||
i = builds_->erase(i);
|
||||
// FIXME: ideally we would interrupt active build steps here.
|
||||
continue;
|
||||
}
|
||||
if (i->second->globalPriority < b->second) {
|
||||
printInfo("priority of build %1% increased", i->first);
|
||||
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
|
||||
i->second->globalPriority = b->second;
|
||||
i->second->propagatePriorities();
|
||||
}
|
||||
@@ -470,7 +464,10 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
step->systemType = step->drv->platform;
|
||||
{
|
||||
StringSet features = step->requiredSystemFeatures = step->parsedDrv->getRequiredSystemFeatures();
|
||||
auto i = step->drv->env.find("requiredSystemFeatures");
|
||||
StringSet features;
|
||||
if (i != step->drv->env.end())
|
||||
features = step->requiredSystemFeatures = tokenizeString<std::set<std::string>>(i->second);
|
||||
if (step->preferLocalBuild)
|
||||
features.insert("local");
|
||||
if (!features.empty()) {
|
||||
@@ -484,41 +481,26 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
throw PreviousFailure{step};
|
||||
|
||||
/* Are all outputs valid? */
|
||||
auto outputHashes = staticOutputHashes(*localStore, *(step->drv));
|
||||
bool valid = true;
|
||||
std::map<DrvOutput, std::optional<StorePath>> missing;
|
||||
for (auto & [outputName, maybeOutputPath] : destStore->queryPartialDerivationOutputMap(drvPath, &*localStore)) {
|
||||
auto outputHash = outputHashes.at(outputName);
|
||||
if (maybeOutputPath && destStore->isValidPath(*maybeOutputPath))
|
||||
continue;
|
||||
valid = false;
|
||||
missing.insert({{outputHash, outputName}, maybeOutputPath});
|
||||
}
|
||||
DerivationOutputs missing;
|
||||
for (auto & i : step->drv->outputs)
|
||||
if (!destStore->isValidPath(*i.second.path(*localStore, step->drv->name, i.first))) {
|
||||
valid = false;
|
||||
missing.insert_or_assign(i.first, i.second);
|
||||
}
|
||||
|
||||
/* Try to copy the missing paths from the local store or from
|
||||
substitutes. */
|
||||
if (!missing.empty()) {
|
||||
|
||||
size_t avail = 0;
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we don't know the output path from the destination
|
||||
// store, see if the local store can tell us.
|
||||
if (/* localStore != destStore && */ !pathOpt && experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
|
||||
if (auto maybeRealisation = localStore->queryRealisation(i))
|
||||
pathOpt = maybeRealisation->outPath;
|
||||
|
||||
if (!pathOpt) {
|
||||
// No hope of getting the store object if we don't know
|
||||
// the path.
|
||||
continue;
|
||||
}
|
||||
auto & path = *pathOpt;
|
||||
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(path))
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
if (/* localStore != destStore && */ localStore->isValidPath(*path))
|
||||
avail++;
|
||||
else if (useSubstitutes) {
|
||||
SubstitutablePathInfos infos;
|
||||
localStore->querySubstitutablePathInfos({{path, {}}}, infos);
|
||||
localStore->querySubstitutablePathInfos({{*path, {}}}, infos);
|
||||
if (infos.size() == 1)
|
||||
avail++;
|
||||
}
|
||||
@@ -526,29 +508,26 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
|
||||
if (missing.size() == avail) {
|
||||
valid = true;
|
||||
for (auto & [i, pathOpt] : missing) {
|
||||
// If we found everything, then we should know the path
|
||||
// to every missing store object now.
|
||||
assert(pathOpt);
|
||||
auto & path = *pathOpt;
|
||||
for (auto & i : missing) {
|
||||
auto path = i.second.path(*localStore, step->drv->name, i.first);
|
||||
|
||||
try {
|
||||
time_t startTime = time(0);
|
||||
|
||||
if (localStore->isValidPath(path))
|
||||
if (localStore->isValidPath(*path))
|
||||
printInfo("copying output ‘%1%’ of ‘%2%’ from local store",
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(drvPath));
|
||||
else {
|
||||
printInfo("substituting output ‘%1%’ of ‘%2%’",
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(drvPath));
|
||||
localStore->ensurePath(path);
|
||||
localStore->ensurePath(*path);
|
||||
// FIXME: should copy directly from substituter to destStore.
|
||||
}
|
||||
|
||||
copyClosure(*localStore, *destStore,
|
||||
StorePathSet { path },
|
||||
StorePathSet { *path },
|
||||
NoRepair, CheckSigs, NoSubstitute);
|
||||
|
||||
time_t stopTime = time(0);
|
||||
@@ -556,13 +535,13 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
{
|
||||
auto mc = startDbUpdate();
|
||||
pqxx::work txn(conn);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, *(step->drv), "out", path);
|
||||
createSubstitutionStep(txn, startTime, stopTime, build, drvPath, "out", *path);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
printError("while copying/substituting output ‘%s’ of ‘%s’: %s",
|
||||
localStore->printStorePath(path),
|
||||
localStore->printStorePath(*path),
|
||||
localStore->printStorePath(drvPath),
|
||||
e.what());
|
||||
valid = false;
|
||||
@@ -582,7 +561,7 @@ Step::ptr State::createStep(ref<Store> destStore,
|
||||
printMsg(lvlDebug, "creating build step ‘%1%’", localStore->printStorePath(drvPath));
|
||||
|
||||
/* Create steps for the dependencies. */
|
||||
for (auto & i : step->drv->inputDrvs.map) {
|
||||
for (auto & i : step->drv->inputDrvs) {
|
||||
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
|
||||
if (dep) {
|
||||
auto step_(step->state.lock());
|
||||
@@ -661,23 +640,21 @@ void State::processJobsetSharesChange(Connection & conn)
|
||||
}
|
||||
|
||||
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::StorePath & drvPath)
|
||||
BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore, const nix::Derivation & drv)
|
||||
{
|
||||
auto derivationOutputs = destStore->queryDerivationOutputMap(drvPath, &*localStore);
|
||||
|
||||
{
|
||||
pqxx::work txn(conn);
|
||||
|
||||
for (auto & [name, output] : derivationOutputs) {
|
||||
for (auto & [name, output] : drv.outputsAndOptPaths(*localStore)) {
|
||||
auto r = txn.exec_params
|
||||
("select id, buildStatus, releaseName, closureSize, size from Builds b "
|
||||
"join BuildOutputs o on b.id = o.build "
|
||||
"where finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1",
|
||||
localStore->printStorePath(output));
|
||||
localStore->printStorePath(*output.second));
|
||||
if (r.empty()) continue;
|
||||
BuildID id = r[0][0].as<BuildID>();
|
||||
|
||||
printInfo("reusing build %d", id);
|
||||
printMsg(lvlInfo, format("reusing build %d") % id);
|
||||
|
||||
BuildOutput res;
|
||||
res.failed = r[0][1].as<int>() == bsFailedWithOutput;
|
||||
@@ -700,7 +677,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
product.fileSize = row[2].as<off_t>();
|
||||
}
|
||||
if (!row[3].is_null())
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), HashAlgorithm::SHA256);
|
||||
product.sha256hash = Hash::parseAny(row[3].as<std::string>(), htSHA256);
|
||||
if (!row[4].is_null())
|
||||
product.path = row[4].as<std::string>();
|
||||
product.name = row[5].as<std::string>();
|
||||
@@ -727,5 +704,5 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
|
||||
}
|
||||
|
||||
NarMemberDatas narMembers;
|
||||
return getBuildOutput(destStore, narMembers, derivationOutputs);
|
||||
return getBuildOutput(destStore, narMembers, drv);
|
||||
}
|
||||
|
||||
@@ -21,9 +21,6 @@
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include "nar-extractor.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "serve-protocol-impl.hh"
|
||||
#include "machines.hh"
|
||||
|
||||
|
||||
typedef unsigned int BuildID;
|
||||
@@ -81,8 +78,6 @@ struct RemoteResult
|
||||
{
|
||||
return stepStatus == bsCachedFailure ? bsFailed : stepStatus;
|
||||
}
|
||||
|
||||
void updateWithBuildResult(const nix::BuildResult &);
|
||||
};
|
||||
|
||||
|
||||
@@ -236,13 +231,17 @@ void getDependents(Step::ptr step, std::set<Build::ptr> & builds, std::set<Step:
|
||||
void visitDependencies(std::function<void(Step::ptr)> visitor, Step::ptr step);
|
||||
|
||||
|
||||
struct Machine : nix::Machine
|
||||
struct Machine
|
||||
{
|
||||
typedef std::shared_ptr<Machine> ptr;
|
||||
|
||||
/* TODO Get rid of: `nix::Machine::storeUri` is normalized in a way
|
||||
we are not yet used to, but once we are, we don't need this. */
|
||||
std::string sshName;
|
||||
bool enabled{true};
|
||||
|
||||
std::string sshName, sshKey;
|
||||
std::set<std::string> systemTypes, supportedFeatures, mandatoryFeatures;
|
||||
unsigned int maxJobs = 1;
|
||||
float speedFactor = 1.0;
|
||||
std::string sshPublicHostKey;
|
||||
|
||||
struct State {
|
||||
typedef std::shared_ptr<State> ptr;
|
||||
@@ -298,12 +297,6 @@ struct Machine : nix::Machine
|
||||
std::regex r("^(ssh://|ssh-ng://)?localhost$");
|
||||
return std::regex_search(sshName, r);
|
||||
}
|
||||
|
||||
// A connection to a machine
|
||||
struct Connection : nix::ServeProto::BasicClientConnection {
|
||||
// Backpointer to the machine
|
||||
ptr machine;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -437,7 +430,7 @@ private:
|
||||
|
||||
/* How often the build steps of a jobset should be repeated in
|
||||
order to detect non-determinism. */
|
||||
std::map<std::pair<std::string, std::string>, size_t> jobsetRepeats;
|
||||
std::map<std::pair<std::string, std::string>, unsigned int> jobsetRepeats;
|
||||
|
||||
bool uploadLogsToBinaryCache;
|
||||
|
||||
@@ -492,13 +485,13 @@ private:
|
||||
const std::string & machine);
|
||||
|
||||
int createSubstitutionStep(pqxx::work & txn, time_t startTime, time_t stopTime,
|
||||
Build::ptr build, const nix::StorePath & drvPath, const nix::Derivation drv, const std::string & outputName, const nix::StorePath & storePath);
|
||||
Build::ptr build, const nix::StorePath & drvPath, const std::string & outputName, const nix::StorePath & storePath);
|
||||
|
||||
void updateBuild(pqxx::work & txn, Build::ptr build, BuildStatus status);
|
||||
|
||||
void queueMonitor();
|
||||
|
||||
void queueMonitorLoop(Connection & conn);
|
||||
void queueMonitorLoop();
|
||||
|
||||
/* Check the queue for new builds. */
|
||||
bool getQueuedBuilds(Connection & conn,
|
||||
@@ -508,7 +501,7 @@ private:
|
||||
void processQueueChange(Connection & conn);
|
||||
|
||||
BuildOutput getBuildOutputCached(Connection & conn, nix::ref<nix::Store> destStore,
|
||||
const nix::StorePath & drvPath);
|
||||
const nix::Derivation & drv);
|
||||
|
||||
Step::ptr createStep(nix::ref<nix::Store> store,
|
||||
Connection & conn, Build::ptr build, const nix::StorePath & drvPath,
|
||||
@@ -550,7 +543,8 @@ private:
|
||||
|
||||
void buildRemote(nix::ref<nix::Store> destStore,
|
||||
Machine::ptr machine, Step::ptr step,
|
||||
const nix::ServeProto::BuildOptions & buildOptions,
|
||||
unsigned int maxSilentTime, unsigned int buildTimeout,
|
||||
unsigned int repeats,
|
||||
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
|
||||
std::function<void(StepState)> updateStep,
|
||||
NarMemberDatas & narMembers);
|
||||
|
||||
@@ -15,7 +15,6 @@ use Nix::Config;
|
||||
use List::SomeUtils qw(all);
|
||||
use Encode;
|
||||
use JSON::PP;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use feature 'state';
|
||||
|
||||
@@ -79,16 +78,14 @@ sub build_GET {
|
||||
|
||||
$c->stash->{template} = 'build.tt';
|
||||
$c->stash->{isLocalStore} = isLocalStore();
|
||||
# XXX: If the derivation is content-addressed then this will always return
|
||||
# false because `$_->path` will be empty
|
||||
$c->stash->{available} =
|
||||
$c->stash->{isLocalStore}
|
||||
? all { $_->path && isValidPath($_->path) } $build->buildoutputs->all
|
||||
? all { isValidPath($_->path) } $build->buildoutputs->all
|
||||
: 1;
|
||||
$c->stash->{drvAvailable} = isValidPath $build->drvpath;
|
||||
|
||||
if ($build->finished && $build->iscachedbuild) {
|
||||
my $path = ($build->buildoutputs)[0]->path or undef;
|
||||
my $path = ($build->buildoutputs)[0]->path or die;
|
||||
my $cachedBuildStep = findBuildStepByOutPath($self, $c, $path);
|
||||
if (defined $cachedBuildStep) {
|
||||
$c->stash->{cachedBuild} = $cachedBuildStep->build;
|
||||
@@ -142,7 +139,7 @@ sub view_nixlog : Chained('buildChain') PathPart('nixlog') {
|
||||
$c->stash->{step} = $step;
|
||||
|
||||
my $drvPath = $step->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -151,7 +148,7 @@ sub view_log : Chained('buildChain') PathPart('log') {
|
||||
my ($self, $c, $mode) = @_;
|
||||
|
||||
my $drvPath = $c->stash->{build}->drvpath;
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath))]);
|
||||
my $log_uri = $c->uri_for($c->controller('Root')->action_for("log"), [basename($drvPath)]);
|
||||
showLog($c, $mode, $log_uri);
|
||||
}
|
||||
|
||||
@@ -241,17 +238,9 @@ sub serveFile {
|
||||
"store", "cat", "--store", getStoreUri(), "$path"]) };
|
||||
|
||||
# Detect MIME type.
|
||||
my $type = "text/plain";
|
||||
if ($path =~ /.*\.(\S{1,})$/xms) {
|
||||
my $ext = $1;
|
||||
my $mimeTypes = MIME::Types->new(only_complete => 1);
|
||||
my $t = $mimeTypes->mimeTypeOf($ext);
|
||||
$type = ref $t ? $t->type : $t if $t;
|
||||
} else {
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
$type = $info->{mime_with_encoding};
|
||||
}
|
||||
state $magic = File::LibMagic->new(follow_symlinks => 1);
|
||||
my $info = $magic->info_from_filename($path);
|
||||
my $type = $info->{mime_with_encoding};
|
||||
$c->response->content_type($type);
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
|
||||
@@ -16,11 +16,8 @@ use List::Util qw[min max];
|
||||
use List::SomeUtils qw{any};
|
||||
use Net::Prometheus;
|
||||
use Types::Standard qw/StrMatch/;
|
||||
use WWW::Form::UrlEncoded::PP qw();
|
||||
|
||||
use constant NARINFO_REGEX => qr{^([a-z0-9]{32})\.narinfo$};
|
||||
# e.g.: https://hydra.example.com/realisations/sha256:a62128132508a3a32eef651d6467695944763602f226ac630543e947d9feb140!out.doi
|
||||
use constant REALISATIONS_REGEX => qr{^(sha256:[a-z0-9]{64}![a-z]+)\.doi$};
|
||||
|
||||
# Put this controller at top-level.
|
||||
__PACKAGE__->config->{namespace} = '';
|
||||
@@ -358,33 +355,6 @@ sub nix_cache_info :Path('nix-cache-info') :Args(0) {
|
||||
}
|
||||
|
||||
|
||||
sub realisations :Path('realisations') :Args(StrMatch[REALISATIONS_REGEX]) {
|
||||
my ($self, $c, $realisation) = @_;
|
||||
|
||||
if (!isLocalStore) {
|
||||
notFound($c, "There is no binary cache here.");
|
||||
}
|
||||
|
||||
else {
|
||||
my ($rawDrvOutput) = $realisation =~ REALISATIONS_REGEX;
|
||||
my $rawRealisation = queryRawRealisation($rawDrvOutput);
|
||||
|
||||
if (!$rawRealisation) {
|
||||
$c->response->status(404);
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = "does not exist\n";
|
||||
$c->forward('Hydra::View::Plain');
|
||||
setCacheHeaders($c, 60 * 60);
|
||||
return;
|
||||
}
|
||||
|
||||
$c->response->content_type('text/plain');
|
||||
$c->stash->{plain}->{data} = $rawRealisation;
|
||||
$c->forward('Hydra::View::Plain');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sub narinfo :Path :Args(StrMatch[NARINFO_REGEX]) {
|
||||
my ($self, $c, $narinfo) = @_;
|
||||
|
||||
@@ -554,7 +524,7 @@ sub log :Local :Args(1) {
|
||||
my $logPrefix = $c->config->{log_prefix};
|
||||
|
||||
if (defined $logPrefix) {
|
||||
$c->res->redirect($logPrefix . "log/" . WWW::Form::UrlEncoded::PP::url_encode(basename($drvPath)));
|
||||
$c->res->redirect($logPrefix . "log/" . basename($drvPath));
|
||||
} else {
|
||||
notFound($c, "The build log of $drvPath is not available.");
|
||||
}
|
||||
|
||||
@@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
|
||||
, "jobset.enabled" => 1
|
||||
},
|
||||
{ order_by => ["project", "jobset", "job"]
|
||||
, join => {"jobset" => "project"}
|
||||
, join => ["project", "jobset"]
|
||||
})];
|
||||
}
|
||||
|
||||
|
||||
@@ -88,6 +88,10 @@ sub buildQueued {
|
||||
common(@_, [], 0);
|
||||
}
|
||||
|
||||
sub buildStarted {
|
||||
common(@_, [], 1);
|
||||
}
|
||||
|
||||
sub buildFinished {
|
||||
common(@_, 2);
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ __PACKAGE__->table("buildoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 1
|
||||
is_nullable: 0
|
||||
|
||||
=cut
|
||||
|
||||
@@ -59,7 +59,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -94,8 +94,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Jsabm3YTcI7YvCuNdKP5Ng
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gU+kZ6A0ISKpaXGRGve8mg
|
||||
|
||||
my %hint = (
|
||||
columns => [
|
||||
|
||||
@@ -55,7 +55,7 @@ __PACKAGE__->table("buildstepoutputs");
|
||||
=head2 path
|
||||
|
||||
data_type: 'text'
|
||||
is_nullable: 1
|
||||
is_nullable: 0
|
||||
|
||||
=cut
|
||||
|
||||
@@ -67,7 +67,7 @@ __PACKAGE__->add_columns(
|
||||
"name",
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
"path",
|
||||
{ data_type => "text", is_nullable => 1 },
|
||||
{ data_type => "text", is_nullable => 0 },
|
||||
);
|
||||
|
||||
=head1 PRIMARY KEY
|
||||
@@ -119,8 +119,8 @@ __PACKAGE__->belongs_to(
|
||||
);
|
||||
|
||||
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2022-06-30 12:02:32
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:Bad70CRTt7zb2GGuRoQ++Q
|
||||
# Created by DBIx::Class::Schema::Loader v0.07049 @ 2021-08-26 12:02:36
|
||||
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:gxp8rOjpRVen4YbIjomHTw
|
||||
|
||||
|
||||
# You can replace this text with custom code or comments, and it will be preserved on regeneration
|
||||
|
||||
@@ -216,7 +216,7 @@ sub json_hint {
|
||||
|
||||
sub _authenticator() {
|
||||
my $authenticator = Crypt::Passphrase->new(
|
||||
encoder => { module => 'Argon2', output_size => 16 },
|
||||
encoder => 'Argon2',
|
||||
validators => [
|
||||
(sub {
|
||||
my ($password, $hash) = @_;
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
#include <pqxx/pqxx>
|
||||
|
||||
#include "environment-variables.hh"
|
||||
#include "util.hh"
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "file-system.hh"
|
||||
#include "util.hh"
|
||||
|
||||
struct HydraConfig
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
<div id="hydra-signin" class="modal hide fade" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
<div class="modal-dialog" role="document">
|
||||
<div class="modal-content">
|
||||
<form id="signin-form">
|
||||
<form>
|
||||
<div class="modal-body">
|
||||
<div class="form-group">
|
||||
<label for="username" class="col-form-label">User name</label>
|
||||
@@ -45,7 +45,7 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="modal-footer">
|
||||
<button type="submit" class="btn btn-primary">Sign in</button>
|
||||
<button id="do-signin" type="button" class="btn btn-primary">Sign in</button>
|
||||
<button type="button" class="btn btn-secondary" data-dismiss="modal">Cancel</button>
|
||||
</div>
|
||||
</form>
|
||||
@@ -57,11 +57,10 @@
|
||||
|
||||
function finishSignOut() { }
|
||||
|
||||
$("#signin-form").submit(function(e) {
|
||||
e.preventDefault();
|
||||
$("#do-signin").click(function() {
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/login') %]",
|
||||
data: $(this).serialize(),
|
||||
data: $(this).parents("form").serialize(),
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
@@ -83,7 +82,7 @@
|
||||
function onGoogleSignIn(googleUser) {
|
||||
requestJSON({
|
||||
url: "[% c.uri_for('/google-login') %]",
|
||||
data: "id_token=" + googleUser.credential,
|
||||
data: "id_token=" + googleUser.getAuthResponse().id_token,
|
||||
type: 'POST',
|
||||
success: function(data) {
|
||||
window.location.reload();
|
||||
@@ -92,6 +91,9 @@
|
||||
return false;
|
||||
};
|
||||
|
||||
$("#google-signin").click(function() {
|
||||
$(".g-signin2:first-child > div").click();
|
||||
});
|
||||
</script>
|
||||
[% END %]
|
||||
|
||||
|
||||
@@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
|
||||
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
|
||||
[% IF bi1.type == "git" %]
|
||||
<tr><td>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
|
||||
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
|
||||
</td></tr>
|
||||
[% ELSE %]
|
||||
<tr><td>
|
||||
|
||||
@@ -133,10 +133,8 @@
|
||||
[% ELSE %]
|
||||
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
|
||||
[% IF c.config.enable_google_login %]
|
||||
<script src="https://accounts.google.com/gsi/client" async defer></script>
|
||||
<div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-auto_prompt="false" data-callback="onGoogleSignIn">
|
||||
</div>
|
||||
<div class="g_id_signin" data-type="standard"></div>
|
||||
<div style="display: none" class="g-signin2" data-onsuccess="onGoogleSignIn" data-theme="dark"></div>
|
||||
<a class="dropdown-item" href="#" id="google-signin">Sign in with Google</a>
|
||||
<div class="dropdown-divider"></div>
|
||||
[% END %]
|
||||
[% IF c.config.github_client_id %]
|
||||
|
||||
@@ -438,17 +438,13 @@ sub checkBuild {
|
||||
# new build to be scheduled if the meta.maintainers field is
|
||||
# changed?
|
||||
if (defined $prevEval) {
|
||||
my $pathOrDrvConstraint = defined $firstOutputPath
|
||||
? { path => $firstOutputPath }
|
||||
: { drvPath => $drvPath };
|
||||
|
||||
my ($prevBuild) = $prevEval->builds->search(
|
||||
# The "project" and "jobset" constraints are
|
||||
# semantically unnecessary (because they're implied by
|
||||
# the eval), but they give a factor 1000 speedup on
|
||||
# the Nixpkgs jobset with PostgreSQL.
|
||||
{ jobset_id => $jobset->get_column('id'), job => $jobName,
|
||||
name => $firstOutputName, %$pathOrDrvConstraint },
|
||||
name => $firstOutputName, path => $firstOutputPath },
|
||||
{ rows => 1, columns => ['id', 'finished'], join => ['buildoutputs'] });
|
||||
if (defined $prevBuild) {
|
||||
#print STDERR " already scheduled/built as build ", $prevBuild->id, "\n";
|
||||
|
||||
@@ -247,7 +247,7 @@ create trigger BuildBumped after update on Builds for each row
|
||||
create table BuildOutputs (
|
||||
build integer not null,
|
||||
name text not null,
|
||||
path text,
|
||||
path text not null,
|
||||
primary key (build, name),
|
||||
foreign key (build) references Builds(id) on delete cascade
|
||||
);
|
||||
@@ -303,7 +303,7 @@ create table BuildStepOutputs (
|
||||
build integer not null,
|
||||
stepnr integer not null,
|
||||
name text not null,
|
||||
path text,
|
||||
path text not null,
|
||||
primary key (build, stepnr, name),
|
||||
foreign key (build) references Builds(id) on delete cascade,
|
||||
foreign key (build, stepnr) references BuildSteps(build, stepnr) on delete cascade
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
-- This index was introduced in a migration but was never recorded in
|
||||
-- hydra.sql (the source of truth), which is why `if exists` is required.
|
||||
drop index if exists IndexBuildOutputsOnPath;
|
||||
@@ -1,4 +0,0 @@
|
||||
-- CA derivations do not have statically known output paths. The values
|
||||
-- are only filled in after the build runs.
|
||||
ALTER TABLE BuildStepOutputs ALTER COLUMN path DROP NOT NULL;
|
||||
ALTER TABLE BuildOutputs ALTER COLUMN path DROP NOT NULL;
|
||||
@@ -1,30 +0,0 @@
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
my $ctx = test_context();
|
||||
use HTTP::Request::Common;
|
||||
use Test2::V0;
|
||||
use Catalyst::Test ();
|
||||
Catalyst::Test->import('Hydra');
|
||||
require Hydra::Schema;
|
||||
require Hydra::Model::DB;
|
||||
my $db = $ctx->db();
|
||||
my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@invalid.org', password => '!' });
|
||||
$user->setPassword('foobar');
|
||||
my $builds = $ctx->makeAndEvaluateJobset(
|
||||
expression => "basic.nix",
|
||||
build => 1
|
||||
);
|
||||
my $login = request(POST '/login', Referer => 'http://localhost', Content => {
|
||||
username => 'alice',
|
||||
password => 'foobar',
|
||||
});
|
||||
is($login->code, 302);
|
||||
my $cookie = $login->header("set-cookie");
|
||||
my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie);
|
||||
ok($my_jobs->is_success);
|
||||
my $content = $my_jobs->content();
|
||||
ok($content =~ /empty_dir/);
|
||||
ok(!($content =~ /fails/));
|
||||
ok(!($content =~ /succeed_with_failed/));
|
||||
done_testing;
|
||||
@@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub {
|
||||
ok($runlog->did_succeed(), "The process did succeed.");
|
||||
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
|
||||
is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
|
||||
is($runlog->exit_code, 0, "This command should have succeeded.");
|
||||
|
||||
subtest "Validate the run log file exists" => sub {
|
||||
|
||||
@@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub {
|
||||
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
|
||||
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
|
||||
is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
|
||||
is($runlog->exit_code, undef, "This command should not have executed.");
|
||||
is($runlog->error_number, 2, "This command failed to exec.");
|
||||
};
|
||||
|
||||
@@ -55,7 +55,7 @@ subtest "Starting a process" => sub {
|
||||
ok($runlog->is_running(), "The process is running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, undef, "The end time is undefined.");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
@@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, 0, "The exit code is 0.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
@@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, 85, "The exit code is 85.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
@@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, 15, "Signal 15 was sent.");
|
||||
@@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub {
|
||||
ok(!$runlog->is_running(), "The process is not running.");
|
||||
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
|
||||
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->error_number, undef, "The error number is undefined");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, 9, "The signal is 9.");
|
||||
@@ -134,8 +134,8 @@ subtest "The process failed to start" => sub {
|
||||
ok(!$runlog->is_running(), "The process is running.");
|
||||
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
|
||||
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
|
||||
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
|
||||
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
|
||||
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
|
||||
is($runlog->error_number, 2, "The error number is saved");
|
||||
is($runlog->exit_code, undef, "The exit code is undefined.");
|
||||
is($runlog->signal, undef, "The signal is undefined.");
|
||||
|
||||
@@ -25,11 +25,11 @@ subtest "requeue" => sub {
|
||||
|
||||
$task->requeue();
|
||||
is($task->attempts, 2, "We should have stored a second retry");
|
||||
is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step");
|
||||
is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step");
|
||||
|
||||
$task->requeue();
|
||||
is($task->attempts, 3, "We should have stored a third retry");
|
||||
is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step");
|
||||
is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step");
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -101,7 +101,7 @@ subtest "save_task" => sub {
|
||||
is($retry->pluginname, "FooPluginName", "Plugin name should match");
|
||||
is($retry->payload, "1", "Payload should match");
|
||||
is($retry->attempts, 1, "We've had one attempt");
|
||||
is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away");
|
||||
is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away");
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
use feature 'unicode_strings';
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
|
||||
my %ctx = test_init(
|
||||
nix_config => qq|
|
||||
experimental-features = ca-derivations
|
||||
|,
|
||||
);
|
||||
|
||||
require Hydra::Schema;
|
||||
require Hydra::Model::DB;
|
||||
|
||||
use JSON::MaybeXS;
|
||||
|
||||
use HTTP::Request::Common;
|
||||
use Test2::V0;
|
||||
require Catalyst::Test;
|
||||
Catalyst::Test->import('Hydra');
|
||||
|
||||
my $db = Hydra::Model::DB->new;
|
||||
hydra_setup($db);
|
||||
|
||||
my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
|
||||
|
||||
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||
|
||||
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix should exit with return code 0");
|
||||
is(nrQueuedBuildsForJobset($jobset), 5, "Evaluating jobs/content-addressed.nix should result in 4 builds");
|
||||
|
||||
for my $build (queuedBuildsForJobset($jobset)) {
|
||||
ok(runBuild($build), "Build '".$build->job."' from jobs/content-addressed.nix should exit with code 0");
|
||||
my $newbuild = $db->resultset('Builds')->find($build->id);
|
||||
is($newbuild->finished, 1, "Build '".$build->job."' from jobs/content-addressed.nix should be finished.");
|
||||
my $expected = $build->job eq "fails" ? 1 : $build->job =~ /with_failed/ ? 6 : 0;
|
||||
is($newbuild->buildstatus, $expected, "Build '".$build->job."' from jobs/content-addressed.nix should have buildstatus $expected.");
|
||||
|
||||
my $response = request("/build/".$build->id);
|
||||
ok($response->is_success, "The 'build' page for build '".$build->job."' should load properly");
|
||||
|
||||
if ($newbuild->buildstatus == 0) {
|
||||
my $buildOutputs = $newbuild->buildoutputs;
|
||||
for my $output ($newbuild->buildoutputs) {
|
||||
# XXX: This hardcodes /nix/store/.
|
||||
# It's fine because in practice the nix store for the tests will be of
|
||||
# the form `/some/thing/nix/store/`, but it would be cleaner if there
|
||||
# was a way to query Nix for its store dir?
|
||||
like(
|
||||
$output->path, qr|/nix/store/|,
|
||||
"Output '".$output->name."' of build '".$build->job."' should be a valid store path"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
isnt(<$ctx{deststoredir}/realisations/*>, "", "The destination store should have the realisations of the built derivations registered");
|
||||
|
||||
done_testing;
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
use feature 'unicode_strings';
|
||||
use strict;
|
||||
use warnings;
|
||||
use Setup;
|
||||
|
||||
my %ctx = test_init();
|
||||
|
||||
require Hydra::Schema;
|
||||
require Hydra::Model::DB;
|
||||
|
||||
use JSON::MaybeXS;
|
||||
|
||||
use HTTP::Request::Common;
|
||||
use Test2::V0;
|
||||
require Catalyst::Test;
|
||||
Catalyst::Test->import('Hydra');
|
||||
|
||||
my $db = Hydra::Model::DB->new;
|
||||
hydra_setup($db);
|
||||
|
||||
my $project = $db->resultset('Projects')->create({name => "tests", displayname => "", owner => "root"});
|
||||
|
||||
my $jobset = createBaseJobset("content-addressed", "content-addressed.nix", $ctx{jobsdir});
|
||||
|
||||
ok(evalSucceeds($jobset), "Evaluating jobs/content-addressed.nix without the experimental feature should exit with return code 0");
|
||||
is(nrQueuedBuildsForJobset($jobset), 0, "Evaluating jobs/content-addressed.nix without the experimental Nix feature should result in 0 build");
|
||||
|
||||
done_testing;
|
||||
@@ -4,8 +4,6 @@ with import ./config.nix;
|
||||
mkDerivation {
|
||||
name = "empty-dir";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
meta.maintainers = [ "alice@invalid.org" ];
|
||||
meta.outPath = "${placeholder "out"}";
|
||||
};
|
||||
|
||||
fails =
|
||||
|
||||
@@ -6,9 +6,4 @@ rec {
|
||||
system = builtins.currentSystem;
|
||||
PATH = path;
|
||||
} // args);
|
||||
mkContentAddressedDerivation = args: mkDerivation ({
|
||||
__contentAddressed = true;
|
||||
outputHashMode = "recursive";
|
||||
outputHashAlgo = "sha256";
|
||||
} // args);
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
let cfg = import ./config.nix; in
|
||||
rec {
|
||||
empty_dir =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "empty-dir";
|
||||
builder = ./empty-dir-builder.sh;
|
||||
};
|
||||
|
||||
fails =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "fails";
|
||||
builder = ./fail.sh;
|
||||
};
|
||||
|
||||
succeed_with_failed =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "succeed-with-failed";
|
||||
builder = ./succeed-with-failed.sh;
|
||||
};
|
||||
|
||||
caDependingOnCA =
|
||||
cfg.mkContentAddressedDerivation {
|
||||
name = "ca-depending-on-ca";
|
||||
builder = ./dir-with-file-builder.sh;
|
||||
FOO = empty_dir;
|
||||
};
|
||||
|
||||
nonCaDependingOnCA =
|
||||
cfg.mkDerivation {
|
||||
name = "non-ca-depending-on-ca";
|
||||
builder = ./dir-with-file-builder.sh;
|
||||
FOO = empty_dir;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
#! /bin/sh
|
||||
|
||||
mkdir $out
|
||||
echo foo > $out/a-file
|
||||
@@ -1,3 +1,6 @@
|
||||
#! /bin/sh
|
||||
|
||||
# Workaround for https://github.com/NixOS/nix/pull/6051
|
||||
echo "some output"
|
||||
|
||||
mkdir $out
|
||||
|
||||
@@ -39,11 +39,7 @@ use Hydra::Helper::Exec;
|
||||
sub new {
|
||||
my ($class, %opts) = @_;
|
||||
|
||||
my $deststoredir;
|
||||
|
||||
# Cleanup will be managed by yath. By the default it will be cleaned
|
||||
# up, but can be kept to aid in debugging test failures.
|
||||
my $dir = File::Temp->newdir(CLEANUP => 0);
|
||||
my $dir = File::Temp->newdir();
|
||||
|
||||
$ENV{'HYDRA_DATA'} = "$dir/hydra-data";
|
||||
mkdir $ENV{'HYDRA_DATA'};
|
||||
@@ -57,7 +53,6 @@ sub new {
|
||||
my $hydra_config = $opts{'hydra_config'} || "";
|
||||
$hydra_config = "queue_runner_metrics_address = 127.0.0.1:0\n" . $hydra_config;
|
||||
if ($opts{'use_external_destination_store'} // 1) {
|
||||
$deststoredir = "$dir/nix/dest-store";
|
||||
$hydra_config = "store_uri = file://$dir/nix/dest-store\n" . $hydra_config;
|
||||
}
|
||||
|
||||
@@ -84,8 +79,7 @@ sub new {
|
||||
nix_state_dir => $nix_state_dir,
|
||||
nix_log_dir => $nix_log_dir,
|
||||
testdir => abs_path(dirname(__FILE__) . "/.."),
|
||||
jobsdir => abs_path(dirname(__FILE__) . "/../jobs"),
|
||||
deststoredir => $deststoredir,
|
||||
jobsdir => abs_path(dirname(__FILE__) . "/../jobs")
|
||||
}, $class;
|
||||
|
||||
if ($opts{'before_init'}) {
|
||||
|
||||
@@ -8,7 +8,7 @@ my $binarycachedir = File::Temp->newdir();
|
||||
|
||||
my $ctx = test_context(
|
||||
nix_config => qq|
|
||||
experimental-features = nix-command ca-derivations
|
||||
experimental-features = nix-command
|
||||
substituters = file://${binarycachedir}?trusted=1
|
||||
|,
|
||||
hydra_config => q|
|
||||
|
||||
Reference in New Issue
Block a user