Merge remote-tracking branch 'upstream/master' into use-store-api

This commit is contained in:
John Ericson
2023-12-04 10:34:00 -05:00
39 changed files with 534 additions and 406 deletions

View File

@@ -4,7 +4,7 @@ on:
push:
jobs:
tests:
runs-on: ubuntu-18.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:

1
.gitignore vendored
View File

@@ -20,6 +20,7 @@ Makefile.in
/src/sql/hydra-postgresql.sql
/src/sql/hydra-sqlite.sql
/src/sql/tmp.sqlite
/src/hydra-build-step/hydra-build-step
/src/hydra-eval-jobs/hydra-eval-jobs
/src/root/static/bootstrap
/src/root/static/js/flot

View File

@@ -80,7 +80,7 @@ $ nix-build
You can use the provided shell.nix to get a working development environment:
```
$ nix-shell
$ ./bootstrap
$ autoreconfPhase
$ configurePhase # NOTE: not ./configure
$ make
```

View File

@@ -1,2 +0,0 @@
#! /bin/sh -e
exec autoreconf -vfi

View File

@@ -10,8 +10,6 @@ AC_PROG_LN_S
AC_PROG_LIBTOOL
AC_PROG_CXX
CXXFLAGS+=" -std=c++17"
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_ARG_WITH([docbook-xsl],

View File

@@ -131,8 +131,8 @@ use LDAP to manage roles and users.
This is configured by defining the `<ldap>` block in the configuration file.
In this block it's possible to configure the authentication plugin in the
`<config>` block. All options are directly passed to `Catalyst::Authentication::Store::LDAP`.
The documentation for the available settings can be found [here]
(https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
The documentation for the available settings can be found
[here](https://metacpan.org/pod/Catalyst::Authentication::Store::LDAP#CONFIGURATION-OPTIONS).
Note that the bind password (if needed) should be supplied as an included file to
prevent it from leaking to the Nix store.
@@ -179,6 +179,7 @@ Example configuration:
<role_search_options>
deref = always
</role_search_options>
</store>
</config>
<role_mapping>
# Make all users in the hydra_admin group Hydra admins

42
flake.lock generated
View File

@@ -1,5 +1,21 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"lowdown-src": {
"flake": false,
"locked": {
@@ -18,37 +34,40 @@
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs",
"nixpkgs": [
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1661606874,
"narHash": "sha256-9+rpYzI+SmxJn+EbYxjGv68Ucp22bdFUSy/4LkHkkDQ=",
"lastModified": 1701122567,
"narHash": "sha256-iA8DqS+W2fWTfR+nNJSvMHqQ+4NpYMRT3b+2zS6JTvE=",
"owner": "NixOS",
"repo": "nix",
"rev": "11e45768b34fdafdcf019ddbd337afa16127ff0f",
"rev": "50f8f1c8bc019a4c0fd098b9ac674b94cfc6af0d",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "2.11.0",
"ref": "2.19.2",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1657693803,
"narHash": "sha256-G++2CJ9u0E7NNTAi9n5G8TdDmGJXcIjkJ3NF8cetQB8=",
"lastModified": 1687379288,
"narHash": "sha256-cSuwfiqYfeVyqzCRkU9AvLTysmEuSal8nh6CYr+xWog=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "365e1b3a859281cf11b94f87231adeabbdd878a2",
"rev": "ef0bc3976340dab9a4e087a0bcff661a8b2e87f3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-22.05-small",
"ref": "nixos-23.05",
"repo": "nixpkgs",
"type": "github"
}
@@ -72,10 +91,7 @@
"root": {
"inputs": {
"nix": "nix",
"nixpkgs": [
"nix",
"nixpkgs"
]
"nixpkgs": "nixpkgs"
}
}
},

108
flake.nix
View File

@@ -1,18 +1,21 @@
{
description = "A Nix-based continuous build system";
inputs.nixpkgs.follows = "nix/nixpkgs";
inputs.nix.url = "github:NixOS/nix/2.11.0";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.05";
inputs.nix.url = "github:NixOS/nix/2.19.2";
inputs.nix.inputs.nixpkgs.follows = "nixpkgs";
outputs = { self, nixpkgs, nix }:
let
version = "${builtins.readFile ./version.txt}.${builtins.substring 0 8 (self.lastModifiedDate or "19700101")}.${self.shortRev or "DIRTY"}";
pkgs = import nixpkgs {
system = "x86_64-linux";
systems = [ "x86_64-linux" "aarch64-linux" ];
forEachSystem = nixpkgs.lib.genAttrs systems;
pkgsBySystem = forEachSystem (system: import nixpkgs {
inherit system;
overlays = [ self.overlays.default nix.overlays.default ];
};
});
# NixOS configuration used for VM tests.
hydraServer =
@@ -58,10 +61,11 @@
};
hydra = with final; let
perlDeps = buildEnv {
hydra = let
inherit (final) lib stdenv;
perlDeps = final.buildEnv {
name = "hydra-perl-deps";
paths = with perlPackages; lib.closePropagation
paths = with final.perlPackages; lib.closePropagation
[
AuthenSASL
CatalystActionREST
@@ -95,7 +99,7 @@
FileSlurper
FileWhich
final.nix.perl-bindings
git
final.git
IOCompress
IPCRun
IPCRun3
@@ -138,15 +142,20 @@
src = self;
buildInputs =
[
nativeBuildInputs =
with final.buildPackages; [
makeWrapper
autoconf
autoreconfHook
automake
libtool
unzip
nukeReferences
pkg-config
mdbook
];
buildInputs =
with final; [
unzip
libpqxx
top-git
mercurial
@@ -159,7 +168,6 @@
final.nix
perlDeps
perl
mdbook
pixz
boost
postgresql_13
@@ -169,7 +177,7 @@
prometheus-cpp
];
checkInputs = [
checkInputs = with final; [
cacert
foreman
glibcLocales
@@ -178,7 +186,7 @@
python3
];
hydraPath = lib.makeBinPath (
hydraPath = with final; lib.makeBinPath (
[
subversion
openssh
@@ -200,7 +208,7 @@
] ++ lib.optionals stdenv.isLinux [ rpm dpkg cdrkit ]
);
OPENLDAP_ROOT = openldap;
OPENLDAP_ROOT = final.openldap;
shellHook = ''
pushd $(git rev-parse --show-toplevel) >/dev/null
@@ -215,8 +223,6 @@
popd >/dev/null
'';
preConfigure = "autoreconf -vfi";
NIX_LDFLAGS = [ "-lpthread" ];
enableParallelBuilding = true;
@@ -254,9 +260,10 @@
hydraJobs = {
build.x86_64-linux = packages.x86_64-linux.hydra;
build = forEachSystem (system: packages.${system}.hydra);
manual =
manual = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "hydra-manual-${version}" { }
''
mkdir -p $out/share
@@ -264,11 +271,12 @@
mkdir $out/nix-support
echo "doc manual $out/share/doc/hydra" >> $out/nix-support/hydra-build-products
'';
'');
tests.install.x86_64-linux =
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
tests.install = forEachSystem (system:
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
name = "hydra-install";
nodes.machine = hydraServer;
testScript =
''
@@ -276,14 +284,16 @@
machine.wait_for_job("hydra-server")
machine.wait_for_job("hydra-evaluator")
machine.wait_for_job("hydra-queue-runner")
machine.wait_for_open_port("3000")
machine.wait_for_open_port(3000)
machine.succeed("curl --fail http://localhost:3000/")
'';
};
});
tests.notifications.x86_64-linux =
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
tests.notifications = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
simpleTest {
name = "hydra-notifications";
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
@@ -311,7 +321,7 @@
# Wait until InfluxDB can receive web requests
machine.wait_for_job("influxdb")
machine.wait_for_open_port("8086")
machine.wait_for_open_port(8086)
# Create an InfluxDB database where hydra will write to
machine.succeed(
@@ -321,7 +331,7 @@
# Wait until hydra-server can receive HTTP requests
machine.wait_for_job("hydra-server")
machine.wait_for_open_port("3000")
machine.wait_for_open_port(3000)
# Setup the project and jobset
machine.succeed(
@@ -336,11 +346,13 @@
+ "--data-urlencode 'q=SELECT * FROM hydra_build_status' | grep success"
)
'';
};
});
tests.gitea.x86_64-linux =
with import (nixpkgs + "/nixos/lib/testing-python.nix") { system = "x86_64-linux"; };
tests.gitea = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
with import (nixpkgs + "/nixos/lib/testing-python.nix") { inherit system; };
makeTest {
name = "hydra-gitea";
nodes.machine = { pkgs, ... }: {
imports = [ hydraServer ];
services.hydra-dev.extraConfig = ''
@@ -352,7 +364,7 @@
distributedBuilds = true;
buildMachines = [{
hostName = "localhost";
systems = [ "x86_64-linux" ];
systems = [ system ];
}];
binaryCaches = [ ];
};
@@ -467,7 +479,7 @@
smallDrv = pkgs.writeText "jobset.nix" ''
{ trivial = builtins.derivation {
name = "trivial";
system = "x86_64-linux";
system = "${system}";
builder = "/bin/sh";
allowSubstitutes = false;
preferLocalBuild = true;
@@ -531,31 +543,37 @@
machine.shutdown()
'';
};
});
tests.validate-openapi = pkgs.runCommand "validate-openapi"
tests.validate-openapi = forEachSystem (system:
let pkgs = pkgsBySystem.${system}; in
pkgs.runCommand "validate-openapi"
{ buildInputs = [ pkgs.openapi-generator-cli ]; }
''
openapi-generator-cli validate -i ${./hydra-api.yaml}
touch $out
'';
'');
container = nixosConfigurations.container.config.system.build.toplevel;
};
checks.x86_64-linux.build = hydraJobs.build.x86_64-linux;
checks.x86_64-linux.install = hydraJobs.tests.install.x86_64-linux;
checks.x86_64-linux.validate-openapi = hydraJobs.tests.validate-openapi;
checks = forEachSystem (system: {
build = hydraJobs.build.${system};
install = hydraJobs.tests.install.${system};
validate-openapi = hydraJobs.tests.validate-openapi.${system};
});
packages.x86_64-linux.hydra = pkgs.hydra;
packages.x86_64-linux.default = pkgs.hydra;
packages = forEachSystem (system: {
hydra = pkgsBySystem.${system}.hydra;
default = pkgsBySystem.${system}.hydra;
});
nixosModules.hydra = {
imports = [ ./hydra-module.nix ];
nixpkgs.overlays = [ self.overlays.default nix.overlays.default ];
};
nixosModules.hydraTest = {
nixosModules.hydraTest = { pkgs, ... }: {
imports = [ self.nixosModules.hydra ];
services.hydra-dev.enable = true;

View File

@@ -533,13 +533,13 @@ paths:
schema:
$ref: '#/components/schemas/Error'
/eval/{build-id}:
/eval/{eval-id}:
get:
summary: Retrieves evaluations identified by build id
summary: Retrieves evaluations identified by eval id
parameters:
- name: build-id
- name: eval-id
in: path
description: build identifier
description: eval identifier
required: true
schema:
type: integer
@@ -551,6 +551,24 @@ paths:
schema:
$ref: '#/components/schemas/JobsetEval'
/eval/{eval-id}/builds:
get:
summary: Retrieves all builds belonging to an evaluation identified by eval id
parameters:
- name: eval-id
in: path
description: eval identifier
required: true
schema:
type: integer
responses:
'200':
description: builds
content:
application/json:
schema:
$ref: '#/components/schemas/JobsetEvalBuilds'
components:
schemas:
@@ -796,6 +814,13 @@ components:
additionalProperties:
$ref: '#/components/schemas/JobsetEvalInput'
JobsetEvalBuilds:
type: array
items:
type: object
additionalProperties:
$ref: '#/components/schemas/Build'
JobsetOverview:
type: array
items:
@@ -870,7 +895,7 @@ components:
description: Size of the produced file
type: integer
defaultpath:
description: This is a Git/Mercurial commit hash or a Subversion revision number
description: if path is a directory, the default file relative to path to be served
type: string
'type':
description: Types of build product (user defined)

View File

@@ -340,7 +340,7 @@ in
systemd.services.hydra-queue-runner =
{ wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ];
after = [ "hydra-init.service" "network.target" ];
after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = [ cfg.package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
restartTriggers = [ hydraConf ];
environment = env // {

View File

@@ -1,5 +1,5 @@
bin_PROGRAMS = hydra-build-step
hydra_build_step_SOURCES = hydra-build-step.cc
hydra_build_step_LDADD = $(NIX_LIBS)
hydra_build_step_LDADD = $(NIX_LIBS) -lnixcmd
hydra_build_step_CXXFLAGS = $(NIX_CFLAGS)

View File

@@ -14,6 +14,7 @@
The build log is written to the path indicated by --log-file.
*/
#include "util.hh"
#include "shared.hh"
#include "common-eval-args.hh"
#include "store-api.hh"
@@ -37,7 +38,7 @@ void mainWrapped(std::list<std::string> args)
{
verbosity = lvlError;
struct MyArgs : MixEvalArgs, MixCommonArgs
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
{
Path drvPath;
std::optional<std::string> buildStoreUrl;
@@ -89,8 +90,8 @@ void mainWrapped(std::list<std::string> args)
throw SysError("creating log file '%s'", logPath);
}
void log(Verbosity lvl, const FormatOrString & fs) override
{ prev.log(lvl, fs); }
void log(Verbosity lvl, std::string_view s) override
{ prev.log(lvl, s); }
void logEI(const ErrorInfo & ei) override
{ prev.logEI(ei); }
@@ -124,9 +125,9 @@ void mainWrapped(std::list<std::string> args)
for (auto & p : drv.inputSrcs)
inputs.insert(p);
for (auto & input : drv.inputDrvs) {
auto drv2 = evalStore->readDerivation(input.first);
for (auto & name : input.second) {
for (auto & [drvPath, node] : drv.inputDrvs.map) {
auto drv2 = evalStore->readDerivation(drvPath);
for (auto & name : node.value) {
if (auto i = get(drv2.outputs, name)) {
auto outPath = i->path(*evalStore, drv2.name, name);
inputs.insert(*outPath);
@@ -193,10 +194,14 @@ void mainWrapped(std::list<std::string> args)
}
}
FdSink stdout(STDOUT_FILENO);
stdout << overhead;
stdout << totalNarSize;
worker_proto::write(*evalStore, stdout, buildResult);
FdSink to { STDOUT_FILENO };
WorkerProto::WriteConn wconn {
.to = to,
// Hardcode latest version because we are deploying hydra
// itself atomically
.version = PROTOCOL_VERSION,
};
WorkerProto::write(*evalStore, wconn, buildResult);
}
int main(int argc, char * * argv)

View File

@@ -7,6 +7,9 @@
#include "store-api.hh"
#include "eval.hh"
#include "eval-inline.hh"
#include "eval-settings.hh"
#include "signals.hh"
#include "terminal.hh"
#include "util.hh"
#include "get-drvs.hh"
#include "globals.hh"
@@ -25,7 +28,8 @@
#include <nlohmann/json.hpp>
void check_pid_status_nonblocking(pid_t check_pid) {
void check_pid_status_nonblocking(pid_t check_pid)
{
// Only check 'initialized' and known PID's
if (check_pid <= 0) { return; }
@@ -52,7 +56,7 @@ using namespace nix;
static Path gcRootsDir;
static size_t maxMemorySize;
struct MyArgs : MixEvalArgs, MixCommonArgs
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
{
Path releaseExpr;
bool flake = false;
@@ -93,14 +97,14 @@ static std::string queryMetaStrings(EvalState & state, DrvInfo & drv, const std:
rec = [&](Value & v) {
state.forceValue(v, noPos);
if (v.type() == nString)
res.push_back(v.string.s);
res.emplace_back(v.string_view());
else if (v.isList())
for (unsigned int n = 0; n < v.listSize(); ++n)
rec(*v.listElems()[n]);
else if (v.type() == nAttrs) {
auto a = v.attrs->find(state.symbols.create(subAttribute));
if (a != v.attrs->end())
res.push_back(std::string(state.forceString(*a->value)));
res.push_back(std::string(state.forceString(*a->value, a->pos, "while evaluating meta attributes")));
}
};
@@ -129,7 +133,7 @@ static void worker(
LockFlags {
.updateLockFile = false,
.useRegistries = false,
.allowMutable = false,
.allowUnlocked = false,
});
callFlake(state, lockedFlake, *vFlake);
@@ -197,26 +201,30 @@ static void worker(
/* If this is an aggregate, then get its constituents. */
auto a = v->attrs->get(state.symbols.create("_hydraAggregate"));
if (a && state.forceBool(*a->value, a->pos)) {
if (a && state.forceBool(*a->value, a->pos, "while evaluating the `_hydraAggregate` attribute")) {
auto a = v->attrs->get(state.symbols.create("constituents"));
if (!a)
throw EvalError("derivation must have a constituents attribute");
NixStringContext context;
state.coerceToString(a->pos, *a->value, context, "while evaluating the `constituents` attribute", true, false);
for (auto & c : context)
std::visit(overloaded {
[&](const NixStringContextElem::Built & b) {
job["constituents"].push_back(b.drvPath->to_string(*state.store));
},
[&](const NixStringContextElem::Opaque & o) {
},
[&](const NixStringContextElem::DrvDeep & d) {
},
}, c.raw);
PathSet context;
state.coerceToString(a->pos, *a->value, context, true, false);
for (auto & i : context)
if (i.at(0) == '!') {
size_t index = i.find("!", 1);
job["constituents"].push_back(std::string(i, index + 1));
}
state.forceList(*a->value, a->pos);
state.forceList(*a->value, a->pos, "while evaluating the `constituents` attribute");
for (unsigned int n = 0; n < a->value->listSize(); ++n) {
auto v = a->value->listElems()[n];
state.forceValue(*v, noPos);
if (v->type() == nString)
job["namedConstituents"].push_back(state.forceStringNoCtx(*v));
job["namedConstituents"].push_back(v->string_view());
}
}
@@ -245,7 +253,7 @@ static void worker(
StringSet ss;
for (auto & i : v->attrs->lexicographicOrder(state.symbols)) {
std::string name(state.symbols[i->name]);
if (name.find('.') != std::string::npos || name.find(' ') != std::string::npos) {
if (name.find(' ') != std::string::npos) {
printError("skipping job with illegal name '%s'", name);
continue;
}
@@ -416,7 +424,11 @@ int main(int argc, char * * argv)
if (response.find("attrs") != response.end()) {
for (auto & i : response["attrs"]) {
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) i;
std::string path = i;
if (path.find(".") != std::string::npos){
path = "\"" + path + "\"";
}
auto s = (attrPath.empty() ? "" : attrPath + ".") + (std::string) path;
newAttrs.insert(s);
}
}
@@ -507,7 +519,7 @@ int main(int argc, char * * argv)
auto drvPath2 = store->parseStorePath((std::string) (*job2)["drvPath"]);
auto drv2 = store->readDerivation(drvPath2);
job["constituents"].push_back(store->printStorePath(drvPath2));
drv.inputDrvs[drvPath2] = {drv2.outputs.begin()->first};
drv.inputDrvs.map[drvPath2].value = {drv2.outputs.begin()->first};
}
if (brokenJobs.empty()) {

View File

@@ -2,6 +2,7 @@
#include "hydra-config.hh"
#include "pool.hh"
#include "shared.hh"
#include "signals.hh"
#include <algorithm>
#include <thread>
@@ -366,6 +367,9 @@ struct Evaluator
printInfo("received jobset event");
}
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) {
printError("exception in database monitor thread: %s", e.what());
sleep(30);
@@ -473,6 +477,9 @@ struct Evaluator
while (true) {
try {
loop();
} catch (pqxx::broken_connection & e) {
printError("Database connection broken: %s", e.what());
std::_Exit(1);
} catch (std::exception & e) {
printError("exception in main loop: %s", e.what());
sleep(30);

View File

@@ -1,5 +1,8 @@
#include "build-result.hh"
#include "serve-protocol.hh"
#include "state.hh"
#include "current-process.hh"
#include "processes.hh"
#include "util.hh"
#include "finally.hh"
#include "url.hh"
@@ -56,7 +59,7 @@ void State::buildRemote(ref<Store> destStore,
// FIXME: set pid for cancellation
auto [status, stdout] = [&]() {
auto [status, childStdout] = [&]() {
MaintainCount<counter> mc(nrStepsBuilding);
return runProgram({
.program = "hydra-build-step",
@@ -96,11 +99,17 @@ void State::buildRemote(ref<Store> destStore,
info->consecutiveFailures = 0;
}
StringSource from { childStdout };
/* Read the BuildResult from the child. */
StringSource source(stdout);
result.overhead += readNum<uint64_t>(source);
auto totalNarSize = readNum<uint64_t>(source);
auto buildResult = worker_proto::read(*localStore, source, Phantom<BuildResult> {});
WorkerProto::ReadConn rconn {
.from = from,
// Hardcode latest version because we are deploying hydra
// itself atomically
.version = PROTOCOL_VERSION,
};
result.overhead += readNum<uint64_t>(rconn.from);
auto totalNarSize = readNum<uint64_t>(rconn.from);
auto buildResult = WorkerProto::Serialise<BuildResult>::read(*localStore, rconn);
// FIXME: make RemoteResult inherit BuildResult.
result.errorMsg = buildResult.errorMsg;

View File

@@ -1,7 +1,7 @@
#include "hydra-build-result.hh"
#include "store-api.hh"
#include "util.hh"
#include "fs-accessor.hh"
#include "source-accessor.hh"
#include <regex>
@@ -63,7 +63,7 @@ BuildOutput getBuildOutput(
auto productsFile = narMembers.find(outputS + "/nix-support/hydra-build-products");
if (productsFile == narMembers.end() ||
productsFile->second.type != FSAccessor::Type::tRegular)
productsFile->second.type != SourceAccessor::Type::tRegular)
continue;
assert(productsFile->second.contents);
@@ -94,7 +94,7 @@ BuildOutput getBuildOutput(
product.name = product.path == store->printStorePath(output) ? "" : baseNameOf(product.path);
if (file->second.type == FSAccessor::Type::tRegular) {
if (file->second.type == SourceAccessor::Type::tRegular) {
product.isRegular = true;
product.fileSize = file->second.fileSize.value();
product.sha256hash = file->second.sha256.value();
@@ -117,7 +117,7 @@ BuildOutput getBuildOutput(
auto file = narMembers.find(product.path);
assert(file != narMembers.end());
if (file->second.type == FSAccessor::Type::tDirectory)
if (file->second.type == SourceAccessor::Type::tDirectory)
res.products.push_back(product);
}
}
@@ -126,7 +126,7 @@ BuildOutput getBuildOutput(
for (auto & output : outputs) {
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-release-name");
if (file == narMembers.end() ||
file->second.type != FSAccessor::Type::tRegular)
file->second.type != SourceAccessor::Type::tRegular)
continue;
res.releaseName = trim(file->second.contents.value());
// FIXME: validate release name
@@ -136,7 +136,7 @@ BuildOutput getBuildOutput(
for (auto & output : outputs) {
auto file = narMembers.find(store->printStorePath(output) + "/nix-support/hydra-metrics");
if (file == narMembers.end() ||
file->second.type != FSAccessor::Type::tRegular)
file->second.type != SourceAccessor::Type::tRegular)
continue;
for (auto & line : tokenizeString<Strings>(file->second.contents.value(), "\n")) {
auto fields = tokenizeString<std::vector<std::string>>(line);

View File

@@ -3,6 +3,7 @@
#include "state.hh"
#include "hydra-build-result.hh"
#include "finally.hh"
#include "terminal.hh"
#include "binary-cache-store.hh"
using namespace nix;
@@ -323,7 +324,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
pqxx::work txn(*conn);
for (auto & b : direct) {
printMsg(lvlInfo, format("marking build %1% as succeeded") % b->id);
printInfo("marking build %1% as succeeded", b->id);
markSucceededBuild(txn, b, res, buildId != b->id || result.isCached,
result.startTime, result.stopTime);
}
@@ -451,7 +452,7 @@ void State::failStep(
/* Mark all builds that depend on this derivation as failed. */
for (auto & build : indirect) {
if (build->finishedInDB) continue;
printMsg(lvlError, format("marking build %1% as failed") % build->id);
printError("marking build %1% as failed", build->id);
txn.exec_params0
("update Builds set finished = 1, buildStatus = $2, startTime = $3, stopTime = $4, isCachedBuild = $5, notificationPendingSince = $4 where id = $1 and finished = 0",
build->id,

View File

@@ -52,7 +52,7 @@ void State::dispatcher()
{
auto dispatcherWakeup_(dispatcherWakeup.lock());
if (!*dispatcherWakeup_) {
printMsg(lvlDebug, format("dispatcher sleeping for %1%s") %
debug("dispatcher sleeping for %1%s",
std::chrono::duration_cast<std::chrono::seconds>(sleepUntil - std::chrono::system_clock::now()).count());
dispatcherWakeup_.wait_until(dispatcherWakeupCV, sleepUntil);
}
@@ -60,7 +60,7 @@ void State::dispatcher()
}
} catch (std::exception & e) {
printMsg(lvlError, format("dispatcher: %1%") % e.what());
printError("dispatcher: %s", e.what());
sleep(1);
}
@@ -80,17 +80,118 @@ system_time State::doDispatch()
jobset.second->pruneSteps();
auto s2 = jobset.second->shareUsed();
if (s1 != s2)
printMsg(lvlDebug, format("pruned scheduling window of %1%:%2% from %3% to %4%")
% jobset.first.first % jobset.first.second % s1 % s2);
debug("pruned scheduling window of %1%:%2% from %3% to %4%",
jobset.first.first, jobset.first.second, s1, s2);
}
}
system_time now = std::chrono::system_clock::now();
/* Start steps until we're out of steps or slots. */
auto sleepUntil = system_time::max();
bool keepGoing;
/* Sort the runnable steps by priority. Priority is establised
as follows (in order of precedence):
- The global priority of the builds that depend on the
step. This allows admins to bump a build to the front of
the queue.
- The lowest used scheduling share of the jobsets depending
on the step.
- The local priority of the build, as set via the build's
meta.schedulingPriority field. Note that this is not
quite correct: the local priority should only be used to
establish priority between builds in the same jobset, but
here it's used between steps in different jobsets if they
happen to have the same lowest used scheduling share. But
that's not very likely.
- The lowest ID of the builds depending on the step;
i.e. older builds take priority over new ones.
FIXME: O(n lg n); obviously, it would be better to keep a
runnable queue sorted by priority. */
struct StepInfo
{
Step::ptr step;
bool alreadyScheduled = false;
/* The lowest share used of any jobset depending on this
step. */
double lowestShareUsed = 1e9;
/* Info copied from step->state to ensure that the
comparator is a partial ordering (see MachineInfo). */
int highestGlobalPriority;
int highestLocalPriority;
BuildID lowestBuildID;
StepInfo(Step::ptr step, Step::State & step_) : step(step)
{
for (auto & jobset : step_.jobsets)
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
highestGlobalPriority = step_.highestGlobalPriority;
highestLocalPriority = step_.highestLocalPriority;
lowestBuildID = step_.lowestBuildID;
}
};
std::vector<StepInfo> runnableSorted;
struct RunnablePerType
{
unsigned int count{0};
std::chrono::seconds waitTime{0};
};
std::unordered_map<std::string, RunnablePerType> runnablePerType;
{
auto runnable_(runnable.lock());
runnableSorted.reserve(runnable_->size());
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
auto step = i->lock();
/* Remove dead steps. */
if (!step) {
i = runnable_->erase(i);
continue;
}
++i;
auto & r = runnablePerType[step->systemType];
r.count++;
/* Skip previously failed steps that aren't ready
to be retried. */
auto step_(step->state.lock());
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
if (step_->tries > 0 && step_->after > now) {
if (step_->after < sleepUntil)
sleepUntil = step_->after;
continue;
}
runnableSorted.emplace_back(step, *step_);
}
}
sort(runnableSorted.begin(), runnableSorted.end(),
[](const StepInfo & a, const StepInfo & b)
{
return
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
a.lowestBuildID < b.lowestBuildID;
});
do {
system_time now = std::chrono::system_clock::now();
now = std::chrono::system_clock::now();
/* Copy the currentJobs field of each machine. This is
necessary to ensure that the sort comparator below is
@@ -138,104 +239,6 @@ system_time State::doDispatch()
a.currentJobs > b.currentJobs;
});
/* Sort the runnable steps by priority. Priority is establised
as follows (in order of precedence):
- The global priority of the builds that depend on the
step. This allows admins to bump a build to the front of
the queue.
- The lowest used scheduling share of the jobsets depending
on the step.
- The local priority of the build, as set via the build's
meta.schedulingPriority field. Note that this is not
quite correct: the local priority should only be used to
establish priority between builds in the same jobset, but
here it's used between steps in different jobsets if they
happen to have the same lowest used scheduling share. But
that's not very likely.
- The lowest ID of the builds depending on the step;
i.e. older builds take priority over new ones.
FIXME: O(n lg n); obviously, it would be better to keep a
runnable queue sorted by priority. */
struct StepInfo
{
Step::ptr step;
/* The lowest share used of any jobset depending on this
step. */
double lowestShareUsed = 1e9;
/* Info copied from step->state to ensure that the
comparator is a partial ordering (see MachineInfo). */
int highestGlobalPriority;
int highestLocalPriority;
BuildID lowestBuildID;
StepInfo(Step::ptr step, Step::State & step_) : step(step)
{
for (auto & jobset : step_.jobsets)
lowestShareUsed = std::min(lowestShareUsed, jobset->shareUsed());
highestGlobalPriority = step_.highestGlobalPriority;
highestLocalPriority = step_.highestLocalPriority;
lowestBuildID = step_.lowestBuildID;
}
};
std::vector<StepInfo> runnableSorted;
struct RunnablePerType
{
unsigned int count{0};
std::chrono::seconds waitTime{0};
};
std::unordered_map<std::string, RunnablePerType> runnablePerType;
{
auto runnable_(runnable.lock());
runnableSorted.reserve(runnable_->size());
for (auto i = runnable_->begin(); i != runnable_->end(); ) {
auto step = i->lock();
/* Remove dead steps. */
if (!step) {
i = runnable_->erase(i);
continue;
}
++i;
auto & r = runnablePerType[step->systemType];
r.count++;
/* Skip previously failed steps that aren't ready
to be retried. */
auto step_(step->state.lock());
r.waitTime += std::chrono::duration_cast<std::chrono::seconds>(now - step_->runnableSince);
if (step_->tries > 0 && step_->after > now) {
if (step_->after < sleepUntil)
sleepUntil = step_->after;
continue;
}
runnableSorted.emplace_back(step, *step_);
}
}
sort(runnableSorted.begin(), runnableSorted.end(),
[](const StepInfo & a, const StepInfo & b)
{
return
a.highestGlobalPriority != b.highestGlobalPriority ? a.highestGlobalPriority > b.highestGlobalPriority :
a.lowestShareUsed != b.lowestShareUsed ? a.lowestShareUsed < b.lowestShareUsed :
a.highestLocalPriority != b.highestLocalPriority ? a.highestLocalPriority > b.highestLocalPriority :
a.lowestBuildID < b.lowestBuildID;
});
/* Find a machine with a free slot and find a step to run
on it. Once we find such a pair, we restart the outer
loop because the machine sorting will have changed. */
@@ -245,6 +248,8 @@ system_time State::doDispatch()
if (mi.machine->state->currentJobs >= mi.machine->maxJobs) continue;
for (auto & stepInfo : runnableSorted) {
if (stepInfo.alreadyScheduled) continue;
auto & step(stepInfo.step);
/* Can this machine do this step? */
@@ -271,6 +276,8 @@ system_time State::doDispatch()
r.count--;
}
stepInfo.alreadyScheduled = true;
/* Make a slot reservation and start a thread to
do the build. */
auto builderThread = std::thread(&State::builder, this,

View File

@@ -8,6 +8,9 @@
#include <prometheus/exposer.h>
#include <nlohmann/json.hpp>
#include "signals.hh"
#include "state.hh"
#include "hydra-build-result.hh"
#include "store-api.hh"
@@ -15,20 +18,11 @@
#include "globals.hh"
#include "hydra-config.hh"
#include "json.hh"
#include "s3-binary-cache-store.hh"
#include "shared.hh"
using namespace nix;
namespace nix {
template<> void toJSON<std::atomic<long>>(std::ostream & str, const std::atomic<long> & n) { str << n; }
template<> void toJSON<std::atomic<uint64_t>>(std::ostream & str, const std::atomic<uint64_t> & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
}
using nlohmann::json;
std::string getEnvOrDie(const std::string & key)
@@ -168,9 +162,9 @@ void State::parseMachines(const std::string & contents)
same name. */
auto i = oldMachines.find(machine->sshName);
if (i == oldMachines.end())
printMsg(lvlChatty, format("adding new machine %1%") % machine->sshName);
printMsg(lvlChatty, "adding new machine %1%", machine->sshName);
else
printMsg(lvlChatty, format("updating machine %1%") % machine->sshName);
printMsg(lvlChatty, "updating machine %1%", machine->sshName);
machine->state = i == oldMachines.end()
? std::make_shared<Machine::State>()
: i->second->state;
@@ -180,7 +174,7 @@ void State::parseMachines(const std::string & contents)
for (auto & m : oldMachines)
if (newMachines.find(m.first) == newMachines.end()) {
if (m.second->enabled)
printMsg(lvlInfo, format("removing machine %1%") % m.first);
printInfo("removing machine %1%", m.first);
/* Add a disabled Machine object to make sure stats are
maintained. */
auto machine = std::make_shared<Machine>(*(m.second));
@@ -474,7 +468,7 @@ void State::markSucceededBuild(pqxx::work & txn, Build::ptr build,
product.type,
product.subtype,
product.fileSize ? std::make_optional(*product.fileSize) : std::nullopt,
product.sha256hash ? std::make_optional(product.sha256hash->to_string(Base16, false)) : std::nullopt,
product.sha256hash ? std::make_optional(product.sha256hash->to_string(HashFormat::Base16, false)) : std::nullopt,
product.path,
product.name,
product.defaultPath);
@@ -542,180 +536,167 @@ std::shared_ptr<PathLocks> State::acquireGlobalLock()
void State::dumpStatus(Connection & conn)
{
std::ostringstream out;
time_t now = time(0);
json statusJson = {
{"status", "up"},
{"time", time(0)},
{"uptime", now - startedAt},
{"pid", getpid()},
{"nrQueuedBuilds", builds.lock()->size()},
{"nrActiveSteps", activeSteps_.lock()->size()},
{"nrStepsBuilding", nrStepsBuilding.load()},
#if 0
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
#endif
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
{"nrBuildsRead", nrBuildsRead.load()},
{"buildReadTimeMs", buildReadTimeMs.load()},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},
{"nrBuildsDone", nrBuildsDone.load()},
{"nrStepsStarted", nrStepsStarted.load()},
{"nrStepsDone", nrStepsDone.load()},
{"nrRetries", nrRetries.load()},
{"maxNrRetries", maxNrRetries.load()},
{"nrQueueWakeups", nrQueueWakeups.load()},
{"nrDispatcherWakeups", nrDispatcherWakeups.load()},
{"dispatchTimeMs", dispatchTimeMs.load()},
{"dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups},
{"nrDbConnections", dbPool.count()},
{"nrActiveDbUpdates", nrActiveDbUpdates.load()},
};
{
JSONObject root(out);
time_t now = time(0);
root.attr("status", "up");
root.attr("time", time(0));
root.attr("uptime", now - startedAt);
root.attr("pid", getpid());
{
auto builds_(builds.lock());
root.attr("nrQueuedBuilds", builds_->size());
}
{
auto steps_(steps.lock());
for (auto i = steps_->begin(); i != steps_->end(); )
if (i->second.lock()) ++i; else i = steps_->erase(i);
root.attr("nrUnfinishedSteps", steps_->size());
statusJson["nrUnfinishedSteps"] = steps_->size();
}
{
auto runnable_(runnable.lock());
for (auto i = runnable_->begin(); i != runnable_->end(); )
if (i->lock()) ++i; else i = runnable_->erase(i);
root.attr("nrRunnableSteps", runnable_->size());
statusJson["nrRunnableSteps"] = runnable_->size();
}
root.attr("nrActiveSteps", activeSteps_.lock()->size());
root.attr("nrStepsBuilding", nrStepsBuilding);
#if 0
root.attr("nrStepsCopyingTo", nrStepsCopyingTo);
root.attr("nrStepsCopyingFrom", nrStepsCopyingFrom);
#endif
root.attr("nrUnsupportedSteps", nrUnsupportedSteps);
root.attr("nrBuildsRead", nrBuildsRead);
root.attr("buildReadTimeMs", buildReadTimeMs);
root.attr("buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead);
root.attr("nrBuildsDone", nrBuildsDone);
root.attr("nrStepsStarted", nrStepsStarted);
root.attr("nrStepsDone", nrStepsDone);
root.attr("nrRetries", nrRetries);
root.attr("maxNrRetries", maxNrRetries);
if (nrStepsDone) {
root.attr("totalStepTime", totalStepTime);
root.attr("totalStepBuildTime", totalStepBuildTime);
root.attr("avgStepTime", (float) totalStepTime / nrStepsDone);
root.attr("avgStepBuildTime", (float) totalStepBuildTime / nrStepsDone);
statusJson["totalStepTime"] = totalStepTime.load();
statusJson["totalStepBuildTime"] = totalStepBuildTime.load();
statusJson["avgStepTime"] = (float) totalStepTime / nrStepsDone;
statusJson["avgStepBuildTime"] = (float) totalStepBuildTime / nrStepsDone;
}
root.attr("nrQueueWakeups", nrQueueWakeups);
root.attr("nrDispatcherWakeups", nrDispatcherWakeups);
root.attr("dispatchTimeMs", dispatchTimeMs);
root.attr("dispatchTimeAvgMs", nrDispatcherWakeups == 0 ? 0.0 : (float) dispatchTimeMs / nrDispatcherWakeups);
root.attr("nrDbConnections", dbPool.count());
root.attr("nrActiveDbUpdates", nrActiveDbUpdates);
{
auto nested = root.object("machines");
auto machines_(machines.lock());
for (auto & i : *machines_) {
auto & m(i.second);
auto & s(m->state);
auto nested2 = nested.object(m->sshName);
nested2.attr("enabled", m->enabled);
{
auto list = nested2.list("systemTypes");
for (auto & s : m->systemTypes)
list.elem(s);
}
{
auto list = nested2.list("supportedFeatures");
for (auto & s : m->supportedFeatures)
list.elem(s);
}
{
auto list = nested2.list("mandatoryFeatures");
for (auto & s : m->mandatoryFeatures)
list.elem(s);
}
nested2.attr("currentJobs", s->currentJobs);
if (s->currentJobs == 0)
nested2.attr("idleSince", s->idleSince);
nested2.attr("nrStepsDone", s->nrStepsDone);
if (m->state->nrStepsDone) {
nested2.attr("totalStepTime", s->totalStepTime);
nested2.attr("totalStepBuildTime", s->totalStepBuildTime);
nested2.attr("avgStepTime", (float) s->totalStepTime / s->nrStepsDone);
nested2.attr("avgStepBuildTime", (float) s->totalStepBuildTime / s->nrStepsDone);
}
auto info(m->state->connectInfo.lock());
nested2.attr("disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil));
nested2.attr("lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure));
nested2.attr("consecutiveFailures", info->consecutiveFailures);
json machine = {
{"enabled", m->enabled},
{"systemTypes", m->systemTypes},
{"supportedFeatures", m->supportedFeatures},
{"mandatoryFeatures", m->mandatoryFeatures},
{"nrStepsDone", s->nrStepsDone.load()},
{"currentJobs", s->currentJobs.load()},
{"disabledUntil", std::chrono::system_clock::to_time_t(info->disabledUntil)},
{"lastFailure", std::chrono::system_clock::to_time_t(info->lastFailure)},
{"consecutiveFailures", info->consecutiveFailures},
};
if (s->currentJobs == 0)
machine["idleSince"] = s->idleSince.load();
if (m->state->nrStepsDone) {
machine["totalStepTime"] = s->totalStepTime.load();
machine["totalStepBuildTime"] = s->totalStepBuildTime.load();
machine["avgStepTime"] = (float) s->totalStepTime / s->nrStepsDone;
machine["avgStepBuildTime"] = (float) s->totalStepBuildTime / s->nrStepsDone;
}
statusJson["machines"][m->sshName] = machine;
}
}
{
auto nested = root.object("jobsets");
auto jobsets_json = json::object();
auto jobsets_(jobsets.lock());
for (auto & jobset : *jobsets_) {
auto nested2 = nested.object(jobset.first.first + ":" + jobset.first.second);
nested2.attr("shareUsed", jobset.second->shareUsed());
nested2.attr("seconds", jobset.second->getSeconds());
jobsets_json[jobset.first.first + ":" + jobset.first.second] = {
{"shareUsed", jobset.second->shareUsed()},
{"seconds", jobset.second->getSeconds()},
};
}
statusJson["jobsets"] = jobsets_json;
}
{
auto nested = root.object("machineTypes");
auto machineTypesJson = json::object();
auto machineTypes_(machineTypes.lock());
for (auto & i : *machineTypes_) {
auto nested2 = nested.object(i.first);
nested2.attr("runnable", i.second.runnable);
nested2.attr("running", i.second.running);
auto machineTypeJson = machineTypesJson[i.first] = {
{"runnable", i.second.runnable},
{"running", i.second.running},
};
if (i.second.runnable > 0)
nested2.attr("waitTime", i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck));
machineTypeJson["waitTime"] = i.second.waitTime.count() +
i.second.runnable * (time(0) - lastDispatcherCheck);
if (i.second.running == 0)
nested2.attr("lastActive", std::chrono::system_clock::to_time_t(i.second.lastActive));
machineTypeJson["lastActive"] = std::chrono::system_clock::to_time_t(i.second.lastActive);
}
statusJson["machineTypes"] = machineTypesJson;
}
auto store = getDestStore();
auto nested = root.object("store");
auto & stats = store->getStats();
nested.attr("narInfoRead", stats.narInfoRead);
nested.attr("narInfoReadAverted", stats.narInfoReadAverted);
nested.attr("narInfoMissing", stats.narInfoMissing);
nested.attr("narInfoWrite", stats.narInfoWrite);
nested.attr("narInfoCacheSize", stats.pathInfoCacheSize);
nested.attr("narRead", stats.narRead);
nested.attr("narReadBytes", stats.narReadBytes);
nested.attr("narReadCompressedBytes", stats.narReadCompressedBytes);
nested.attr("narWrite", stats.narWrite);
nested.attr("narWriteAverted", stats.narWriteAverted);
nested.attr("narWriteBytes", stats.narWriteBytes);
nested.attr("narWriteCompressedBytes", stats.narWriteCompressedBytes);
nested.attr("narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs);
nested.attr("narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0);
nested.attr("narCompressionSpeed", // MiB/s
statusJson["store"] = {
{"narInfoRead", stats.narInfoRead.load()},
{"narInfoReadAverted", stats.narInfoReadAverted.load()},
{"narInfoMissing", stats.narInfoMissing.load()},
{"narInfoWrite", stats.narInfoWrite.load()},
{"narInfoCacheSize", stats.pathInfoCacheSize.load()},
{"narRead", stats.narRead.load()},
{"narReadBytes", stats.narReadBytes.load()},
{"narReadCompressedBytes", stats.narReadCompressedBytes.load()},
{"narWrite", stats.narWrite.load()},
{"narWriteAverted", stats.narWriteAverted.load()},
{"narWriteBytes", stats.narWriteBytes.load()},
{"narWriteCompressedBytes", stats.narWriteCompressedBytes.load()},
{"narWriteCompressionTimeMs", stats.narWriteCompressionTimeMs.load()},
{"narCompressionSavings",
stats.narWriteBytes
? 1.0 - (double) stats.narWriteCompressedBytes / stats.narWriteBytes
: 0.0},
{"narCompressionSpeed", // MiB/s
stats.narWriteCompressionTimeMs
? (double) stats.narWriteBytes / stats.narWriteCompressionTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
: 0.0},
};
auto s3Store = dynamic_cast<S3BinaryCacheStore *>(&*store);
if (s3Store) {
auto nested2 = nested.object("s3");
auto & s3Stats = s3Store->getS3Stats();
nested2.attr("put", s3Stats.put);
nested2.attr("putBytes", s3Stats.putBytes);
nested2.attr("putTimeMs", s3Stats.putTimeMs);
nested2.attr("putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
nested2.attr("get", s3Stats.get);
nested2.attr("getBytes", s3Stats.getBytes);
nested2.attr("getTimeMs", s3Stats.getTimeMs);
nested2.attr("getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0);
nested2.attr("head", s3Stats.head);
nested2.attr("costDollarApprox",
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09);
auto jsonS3 = statusJson["s3"] = {
{"put", s3Stats.put.load()},
{"putBytes", s3Stats.putBytes.load()},
{"putTimeMs", s3Stats.putTimeMs.load()},
{"putSpeed",
s3Stats.putTimeMs
? (double) s3Stats.putBytes / s3Stats.putTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"get", s3Stats.get.load()},
{"getBytes", s3Stats.getBytes.load()},
{"getTimeMs", s3Stats.getTimeMs.load()},
{"getSpeed",
s3Stats.getTimeMs
? (double) s3Stats.getBytes / s3Stats.getTimeMs * 1000.0 / (1024.0 * 1024.0)
: 0.0},
{"head", s3Stats.head.load()},
{"costDollarApprox",
(s3Stats.get + s3Stats.head) / 10000.0 * 0.004
+ s3Stats.put / 1000.0 * 0.005 +
+ s3Stats.getBytes / (1024.0 * 1024.0 * 1024.0) * 0.09},
};
}
}
@@ -724,7 +705,7 @@ void State::dumpStatus(Connection & conn)
pqxx::work txn(conn);
// FIXME: use PostgreSQL 9.5 upsert.
txn.exec("delete from SystemStatus where what = 'queue-runner'");
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", out.str());
txn.exec_params0("insert into SystemStatus values ('queue-runner', $1)", statusJson.dump());
txn.exec("notify status_dumped");
txn.commit();
}
@@ -949,7 +930,6 @@ int main(int argc, char * * argv)
});
settings.verboseBuild = true;
settings.lockCPU = false;
State state{metricsAddrOpt};
if (status)

View File

@@ -24,13 +24,13 @@ struct Extractor : ParseSink
void createDirectory(const Path & path) override
{
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tDirectory });
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tDirectory });
}
void createRegularFile(const Path & path) override
{
curMember = &members.insert_or_assign(prefix + path, NarMemberData {
.type = FSAccessor::Type::tRegular,
.type = SourceAccessor::Type::tRegular,
.fileSize = 0,
.contents = filesToKeep.count(path) ? std::optional("") : std::nullopt,
}).first->second;
@@ -66,8 +66,14 @@ struct Extractor : ParseSink
void createSymlink(const Path & path, const std::string & target) override
{
members.insert_or_assign(prefix + path, NarMemberData { .type = FSAccessor::Type::tSymlink });
members.insert_or_assign(prefix + path, NarMemberData { .type = SourceAccessor::Type::tSymlink });
}
void isExecutable() override
{ }
void closeRegularFile() override
{ }
};

View File

@@ -1,13 +1,13 @@
#pragma once
#include "fs-accessor.hh"
#include "source-accessor.hh"
#include "types.hh"
#include "serialise.hh"
#include "hash.hh"
struct NarMemberData
{
nix::FSAccessor::Type type;
nix::SourceAccessor::Type type;
std::optional<uint64_t> fileSize;
std::optional<std::string> contents;
std::optional<nix::Hash> sha256;

View File

@@ -13,7 +13,7 @@ void State::queueMonitor()
try {
queueMonitorLoop();
} catch (std::exception & e) {
printMsg(lvlError, format("queue monitor: %1%") % e.what());
printError("queue monitor: %s", e.what());
sleep(10); // probably a DB problem, so don't retry right away
}
}
@@ -142,13 +142,13 @@ bool State::getQueuedBuilds(Connection & conn,
createBuild = [&](Build::ptr build) {
prom.queue_build_loads.Increment();
printMsg(lvlTalkative, format("loading build %1% (%2%)") % build->id % build->fullJobName());
printMsg(lvlTalkative, "loading build %1% (%2%)", build->id, build->fullJobName());
nrAdded++;
newBuildsByID.erase(build->id);
if (!localStore->isValidPath(build->drvPath)) {
/* Derivation has been GC'ed prematurely. */
printMsg(lvlError, format("aborting GC'ed build %1%") % build->id);
printError("aborting GC'ed build %1%", build->id);
if (!build->finishedInDB) {
auto mc = startDbUpdate();
pqxx::work txn(conn);
@@ -302,7 +302,7 @@ bool State::getQueuedBuilds(Connection & conn,
/* Add the new runnable build steps to runnable and wake up
the builder threads. */
printMsg(lvlChatty, format("got %1% new runnable steps from %2% new builds") % newRunnable.size() % nrAdded);
printMsg(lvlChatty, "got %1% new runnable steps from %2% new builds", newRunnable.size(), nrAdded);
for (auto & r : newRunnable)
makeRunnable(r);
@@ -315,7 +315,7 @@ bool State::getQueuedBuilds(Connection & conn,
if (std::chrono::system_clock::now() > start + std::chrono::seconds(600)) {
prom.queue_checks_early_exits.Increment();
break;
}
}
}
prom.queue_checks_finished.Increment();
@@ -358,13 +358,13 @@ void State::processQueueChange(Connection & conn)
for (auto i = builds_->begin(); i != builds_->end(); ) {
auto b = currentIds.find(i->first);
if (b == currentIds.end()) {
printMsg(lvlInfo, format("discarding cancelled build %1%") % i->first);
printInfo("discarding cancelled build %1%", i->first);
i = builds_->erase(i);
// FIXME: ideally we would interrupt active build steps here.
continue;
}
if (i->second->globalPriority < b->second) {
printMsg(lvlInfo, format("priority of build %1% increased") % i->first);
printInfo("priority of build %1% increased", i->first);
i->second->globalPriority = b->second;
i->second->propagatePriorities();
}
@@ -561,7 +561,7 @@ Step::ptr State::createStep(ref<Store> destStore,
printMsg(lvlDebug, "creating build step %1%", localStore->printStorePath(drvPath));
/* Create steps for the dependencies. */
for (auto & i : step->drv->inputDrvs) {
for (auto & i : step->drv->inputDrvs.map) {
auto dep = createStep(destStore, conn, build, i.first, 0, step, finishedDrvs, newSteps, newRunnable);
if (dep) {
auto step_(step->state.lock());
@@ -654,7 +654,7 @@ BuildOutput State::getBuildOutputCached(Connection & conn, nix::ref<nix::Store>
if (r.empty()) continue;
BuildID id = r[0][0].as<BuildID>();
printMsg(lvlInfo, format("reusing build %d") % id);
printInfo("reusing build %d", id);
BuildOutput res;
res.failed = r[0][1].as<int>() == bsFailedWithOutput;

View File

@@ -216,8 +216,8 @@ sub scmdiff : Path('/api/scmdiff') Args(0) {
} elsif ($type eq "git") {
my $clonePath = getSCMCacheDir . "/git/" . sha256_hex($uri);
die if ! -d $clonePath;
$diff .= `(cd $clonePath; git log $rev1..$rev2)`;
$diff .= `(cd $clonePath; git diff $rev1..$rev2)`;
$diff .= `(cd $clonePath; git --git-dir .git log $rev1..$rev2)`;
$diff .= `(cd $clonePath; git --git-dir .git diff $rev1..$rev2)`;
}
$c->stash->{'plain'} = { data => (scalar $diff) || " " };

View File

@@ -238,9 +238,17 @@ sub serveFile {
"store", "cat", "--store", getStoreUri(), "$path"]) };
# Detect MIME type.
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
my $type = $info->{mime_with_encoding};
my $type = "text/plain";
if ($path =~ /.*\.(\S{1,})$/xms) {
my $ext = $1;
my $mimeTypes = MIME::Types->new(only_complete => 1);
my $t = $mimeTypes->mimeTypeOf($ext);
$type = ref $t ? $t->type : $t if $t;
} else {
state $magic = File::LibMagic->new(follow_symlinks => 1);
my $info = $magic->info_from_filename($path);
$type = $info->{mime_with_encoding};
}
$c->response->content_type($type);
$c->forward('Hydra::View::Plain');
}

View File

@@ -463,7 +463,7 @@ sub my_jobs_tab :Chained('dashboard_base') :PathPart('my-jobs-tab') :Args(0) {
, "jobset.enabled" => 1
},
{ order_by => ["project", "jobset", "job"]
, join => ["project", "jobset"]
, join => {"jobset" => "project"}
})];
}

View File

@@ -216,7 +216,7 @@ sub json_hint {
sub _authenticator() {
my $authenticator = Crypt::Passphrase->new(
encoder => 'Argon2',
encoder => { module => 'Argon2', output_size => 16 },
validators => [
(sub {
my ($password, $hash) = @_;

View File

@@ -2,6 +2,7 @@
#include <pqxx/pqxx>
#include "environment-variables.hh"
#include "util.hh"

View File

@@ -2,6 +2,7 @@
#include <map>
#include "file-system.hh"
#include "util.hh"
struct HydraConfig

View File

@@ -82,7 +82,7 @@
function onGoogleSignIn(googleUser) {
requestJSON({
url: "[% c.uri_for('/google-login') %]",
data: "id_token=" + googleUser.getAuthResponse().id_token,
data: "id_token=" + googleUser.credential,
type: 'POST',
success: function(data) {
window.location.reload();
@@ -91,9 +91,6 @@
return false;
};
$("#google-signin").click(function() {
$(".g-signin2:first-child > div").click();
});
</script>
[% END %]

View File

@@ -374,7 +374,7 @@ BLOCK renderInputDiff; %]
[% ELSIF bi1.uri == bi2.uri && bi1.revision != bi2.revision %]
[% IF bi1.type == "git" %]
<tr><td>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 6) _ ' to ' _ bi2.revision.substr(0, 6)) %]</tt>
<b>[% bi1.name %]</b></td><td><tt>[% INCLUDE renderDiffUri contents=(bi1.revision.substr(0, 8) _ ' to ' _ bi2.revision.substr(0, 8)) %]</tt>
</td></tr>
[% ELSE %]
<tr><td>

View File

@@ -133,8 +133,10 @@
[% ELSE %]
[% WRAPPER makeSubMenu title="Sign in" id="sign-in-menu" align="right" %]
[% IF c.config.enable_google_login %]
<div style="display: none" class="g-signin2" data-onsuccess="onGoogleSignIn" data-theme="dark"></div>
<a class="dropdown-item" href="#" id="google-signin">Sign in with Google</a>
<script src="https://accounts.google.com/gsi/client" async defer></script>
<div id="g_id_onload" data-client_id="[% c.config.google_client_id %]" data-callback="onGoogleSignIn">
</div>
<div class="g_id_signin" data-type="standard"></div>
<div class="dropdown-divider"></div>
[% END %]
[% IF c.config.github_client_id %]

3
src/sql/upgrade-83.sql Normal file
View File

@@ -0,0 +1,3 @@
-- This index was introduced in a migration but was never recorded in
-- hydra.sql (the source of truth), which is why `if exists` is required.
drop index if exists IndexBuildOutputsOnPath;

View File

@@ -0,0 +1,30 @@
use strict;
use warnings;
use Setup;
my $ctx = test_context();
use HTTP::Request::Common;
use Test2::V0;
use Catalyst::Test ();
Catalyst::Test->import('Hydra');
require Hydra::Schema;
require Hydra::Model::DB;
my $db = $ctx->db();
my $user = $db->resultset('Users')->create({ username => 'alice', emailaddress => 'alice@invalid.org', password => '!' });
$user->setPassword('foobar');
my $builds = $ctx->makeAndEvaluateJobset(
expression => "basic.nix",
build => 1
);
my $login = request(POST '/login', Referer => 'http://localhost', Content => {
username => 'alice',
password => 'foobar',
});
is($login->code, 302);
my $cookie = $login->header("set-cookie");
my $my_jobs = request(GET '/dashboard/alice/my-jobs-tab', Accept => 'application/json', Cookie => $cookie);
ok($my_jobs->is_success);
my $content = $my_jobs->content();
ok($content =~ /empty_dir/);
ok(!($content =~ /fails/));
ok(!($content =~ /succeed_with_failed/));
done_testing;

View File

@@ -57,8 +57,8 @@ subtest "Validate a run log was created" => sub {
ok($runlog->did_succeed(), "The process did succeed.");
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
is($runlog->command, 'cp "$HYDRA_JSON" "$HYDRA_DATA/joboutput.json"', "The executed command is saved.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
is($runlog->exit_code, 0, "This command should have succeeded.");
subtest "Validate the run log file exists" => sub {

View File

@@ -43,8 +43,8 @@ subtest "Validate a run log was created" => sub {
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
is($runlog->job_matcher, "*:*:*", "An unspecified job matcher is defaulted to *:*:*");
is($runlog->command, 'invalid-command-this-does-not-exist', "The executed command is saved.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is also recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is also recent.");
is($runlog->exit_code, undef, "This command should not have executed.");
is($runlog->error_number, 2, "This command failed to exec.");
};

View File

@@ -55,7 +55,7 @@ subtest "Starting a process" => sub {
ok($runlog->is_running(), "The process is running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, undef, "The end time is undefined.");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, undef, "The signal is undefined.");
@@ -70,8 +70,8 @@ subtest "The process completed (success)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, 0, "The exit code is 0.");
is($runlog->signal, undef, "The signal is undefined.");
@@ -86,8 +86,8 @@ subtest "The process completed (errored)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, 85, "The exit code is 85.");
is($runlog->signal, undef, "The signal is undefined.");
@@ -102,8 +102,8 @@ subtest "The process completed (status 15, child error 0)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, 15, "Signal 15 was sent.");
@@ -118,8 +118,8 @@ subtest "The process completed (signaled)" => sub {
ok(!$runlog->is_running(), "The process is not running.");
ok($runlog->did_fail_with_signal(), "The process was killed by a signal.");
ok(!$runlog->did_fail_with_exec_error(), "The process did not fail to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, undef, "The error number is undefined");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, 9, "The signal is 9.");
@@ -134,8 +134,8 @@ subtest "The process failed to start" => sub {
ok(!$runlog->is_running(), "The process is running.");
ok(!$runlog->did_fail_with_signal(), "The process was not killed by a signal.");
ok($runlog->did_fail_with_exec_error(), "The process failed to start due to an exec error.");
is($runlog->start_time, within(time() - 1, 2), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 2), "The end time is recent.");
is($runlog->start_time, within(time() - 1, 5), "The start time is recent.");
is($runlog->end_time, within(time() - 1, 5), "The end time is recent.");
is($runlog->error_number, 2, "The error number is saved");
is($runlog->exit_code, undef, "The exit code is undefined.");
is($runlog->signal, undef, "The signal is undefined.");

View File

@@ -25,11 +25,11 @@ subtest "requeue" => sub {
$task->requeue();
is($task->attempts, 2, "We should have stored a second retry");
is($task->retry_at, within(time() + 4, 2), "Delayed two exponential backoff step");
is($task->retry_at, within(time() + 4, 5), "Delayed two exponential backoff step");
$task->requeue();
is($task->attempts, 3, "We should have stored a third retry");
is($task->retry_at, within(time() + 8, 2), "Delayed a third exponential backoff step");
is($task->retry_at, within(time() + 8, 5), "Delayed a third exponential backoff step");
};
done_testing;

View File

@@ -101,7 +101,7 @@ subtest "save_task" => sub {
is($retry->pluginname, "FooPluginName", "Plugin name should match");
is($retry->payload, "1", "Payload should match");
is($retry->attempts, 1, "We've had one attempt");
is($retry->retry_at, within(time() + 1, 2), "The retry at should be approximately one second away");
is($retry->retry_at, within(time() + 1, 5), "The retry at should be approximately one second away");
};
done_testing;

View File

@@ -4,6 +4,8 @@ with import ./config.nix;
mkDerivation {
name = "empty-dir";
builder = ./empty-dir-builder.sh;
meta.maintainers = [ "alice@invalid.org" ];
meta.outPath = "${placeholder "out"}";
};
fails =