Compare commits

..

5 Commits

Author SHA1 Message Date
John Ericson
f3dc4e9228 WIP ssh-ng:// 2025-02-14 18:51:49 -05:00
John Ericson
45d075e5db WIP: Avoid custom logic copying outputs from the remote builder
We need a replacement for the nar member logic, however. And maybe also
a test that fails until this is fixed (this one should not be passing).
2025-02-14 18:50:38 -05:00
John Ericson
9e162dcf52 Avoid custom logic to copy inputs to the remote builder 2025-02-14 18:50:38 -05:00
John Ericson
4c173daec7 Use LegacySSHStore
In https://github.com/NixOS/nix/pull/10748 it is extended with
everything we need.
2025-02-14 18:48:46 -05:00
John Ericson
8675aee25b WIP TEMP nix update, don't mere this!
Not until https://github.com/NixOS/nix/pull/10748 lands

Flake lock file updates:

• Updated input 'nix':
    'github:NixOS/nix/970942f45836172fda410a638853382952189eb9?narHash=sha256-jGFuyYKJjJZsBRoi7ZcaVKt1OYxusz/ld1HA7VD2w/0%3D' (2025-02-12)
  → 'github:NixOS/nix/5eade4825221d3284fc6555cb20de2c7aa171d72?narHash=sha256-n5kdS1C24tlJxDV6Wm1iBlyvGk%2Bp0gMXRcWVCAipYLs%3D' (2025-02-14)

• Updated input 'nix-eval-jobs':
    'github:Ericson2314/nix-eval-jobs/5e27c2724a4b07862e7ff1a198aa2ed68dea3e2c?narHash=sha256-7xgSdKnQW11eWd59MnpUNS%2BgwgtOJH2ShzLwByev3rg%3D' (2025-02-14)
  → 'github:Ericson2314/nix-eval-jobs/de345eb4518d952c2d86261b270f2c31edecd3de?narHash=sha256-dNMJY6%2BG3PwE8lIAhwetPJdA2DxCEKRXPY/EtHmdDh4%3D' (2025-02-14)
2025-02-14 18:15:22 -05:00
12 changed files with 310 additions and 330 deletions

20
flake.lock generated
View File

@@ -5,6 +5,7 @@
"flake-compat": [],
"flake-parts": [],
"git-hooks-nix": [],
"nixfmt": [],
"nixpkgs": [
"nixpkgs"
],
@@ -12,16 +13,16 @@
"nixpkgs-regression": []
},
"locked": {
"lastModified": 1739899400,
"narHash": "sha256-q/RgA4bB7zWai4oPySq9mch7qH14IEeom2P64SXdqHs=",
"lastModified": 1739571938,
"narHash": "sha256-NlaLAed/xei6RWpU2HIIbDjILRC4l1NIfGeyrn7ALQs=",
"owner": "NixOS",
"repo": "nix",
"rev": "e310c19a1aeb1ce1ed4d41d5ab2d02db596e0918",
"rev": "ffc649d2eabdd3e678b5bcc211dd59fd06debf3e",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "2.26-maintenance",
"ref": "ssh-ng-extensions-for-hydra",
"repo": "nix",
"type": "github"
}
@@ -29,15 +30,16 @@
"nix-eval-jobs": {
"flake": false,
"locked": {
"lastModified": 1739500569,
"narHash": "sha256-3wIReAqdTALv39gkWXLMZQvHyBOc3yPkWT2ZsItxedY=",
"owner": "nix-community",
"lastModified": 1739499741,
"narHash": "sha256-dNMJY6+G3PwE8lIAhwetPJdA2DxCEKRXPY/EtHmdDh4=",
"owner": "Ericson2314",
"repo": "nix-eval-jobs",
"rev": "4b392b284877d203ae262e16af269f702df036bc",
"rev": "de345eb4518d952c2d86261b270f2c31edecd3de",
"type": "github"
},
"original": {
"owner": "nix-community",
"owner": "Ericson2314",
"ref": "nix-2.27",
"repo": "nix-eval-jobs",
"type": "github"
}

View File

@@ -4,7 +4,7 @@
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11-small";
inputs.nix = {
url = "github:NixOS/nix/2.26-maintenance";
url = "github:NixOS/nix/ssh-ng-extensions-for-hydra";
inputs.nixpkgs.follows = "nixpkgs";
# hide nix dev tooling from our lock file
@@ -13,10 +13,11 @@
inputs.nixpkgs-regression.follows = "";
inputs.nixpkgs-23-11.follows = "";
inputs.flake-compat.follows = "";
inputs.nixfmt.follows = "";
};
inputs.nix-eval-jobs = {
url = "github:nix-community/nix-eval-jobs";
url = "github:Ericson2314/nix-eval-jobs/nix-2.27";
# We want to control the deps precisely
flake = false;
};
@@ -85,7 +86,6 @@
nix-util
nix-store
nix-main
nix-cmd
nix-cli
;
nix-perl-bindings = nix.hydraJobs.perlBindings.${system};

View File

@@ -1,6 +1,6 @@
#!/bin/sh
exec mdbook serve \
mdbook serve \
--port 63332 \
--dest-dir ./.hydra-data/manual \
./doc/manual/

View File

@@ -11,7 +11,6 @@
, nix-util
, nix-store
, nix-main
, nix-cmd
, nix-cli
, nix-perl-bindings
, git
@@ -179,7 +178,6 @@ stdenv.mkDerivation (finalAttrs: {
nix-util
nix-store
nix-main
nix-cmd
perlDeps
perl
boost

View File

@@ -1,213 +0,0 @@
/* This is a helper program that performs a build step, i.e. a single
derivation. In addition to a derivation path, it takes three store
URLs as arguments:
* --store: The store that will hold the resulting store paths
(typically a binary cache).
* --eval-store: The store that holds the .drv files, as produced by
hydra-evaluator.
* --build-store: The store that performs the build (often a
SSHStore for remote builds).
The build log is written to the path indicated by --log-file.
*/
#include "util.hh"
#include "shared.hh"
#include "common-eval-args.hh"
#include "store-api.hh"
#include "build-result.hh"
#include "derivations.hh"
#include "worker-protocol.hh"
#include <chrono>
using namespace nix;
// FIXME: cut&paste
static std::string_view getS(const std::vector<Logger::Field> & fields, size_t n)
{
assert(n < fields.size());
assert(fields[n].type == Logger::Field::tString);
return fields[n].s;
}
void mainWrapped(std::list<std::string> args)
{
verbosity = lvlError;
struct MyArgs : MixEvalArgs, MixCommonArgs, RootArgs
{
Path drvPath;
std::optional<std::string> buildStoreUrl;
std::optional<Path> logPath;
std::optional<uint64_t> maxOutputSize;
MyArgs() : MixCommonArgs("hydra-build-step")
{
expectArg("drv-path", &drvPath);
addFlag({
.longName = "build-store",
.description = "The Nix store to use for building the derivation.",
//.category = category,
.labels = {"store-url"},
.handler = {&buildStoreUrl},
});
addFlag({
.longName = "log-file",
.description = "The path to the build log.",
.labels = {"path"},
.handler = {&logPath},
});
addFlag({
.longName = "max-output-size",
.description = "Maximum size of the outputs.",
.labels = {"bytes"},
.handler = {&maxOutputSize},
});
}
};
/* A logger that intercepts all build log lines and writes them to
the log file. */
MyArgs myArgs;
myArgs.parseCmdline(args);
struct MyLogger : public Logger
{
Logger & prev;
AutoCloseFD logFile;
MyLogger(Logger & prev, Path logPath) : prev(prev)
{
logFile = open(logPath.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (!logFile)
throw SysError("creating log file '%s'", logPath);
}
void log(Verbosity lvl, std::string_view s) override
{ prev.log(lvl, s); }
void logEI(const ErrorInfo & ei) override
{ prev.logEI(ei); }
void writeToStdout(std::string_view s) override
{ prev.writeToStdout(s); }
void result(ActivityId act, ResultType type, const Fields & fields) override
{
if (type == resBuildLogLine)
writeLine(logFile.get(), std::string(getS(fields, 0)));
else
prev.result(act, type, fields);
}
};
auto destStore = openStore();
auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : destStore;
auto buildStore = myArgs.buildStoreUrl ? openStore(*myArgs.buildStoreUrl) : destStore;
auto drvPath = evalStore->parseStorePath(myArgs.drvPath);
auto drv = evalStore->readDerivation(drvPath);
BasicDerivation basicDrv(drv);
uint64_t overhead = 0;
/* Gather the inputs. */
StorePathSet inputs;
for (auto & p : drv.inputSrcs)
inputs.insert(p);
for (auto & [drvPath, node] : drv.inputDrvs.map) {
auto drv2 = evalStore->readDerivation(drvPath);
for (auto & name : node.value) {
if (auto i = get(drv2.outputs, name)) {
auto outPath = i->path(*evalStore, drv2.name, name);
inputs.insert(*outPath);
basicDrv.inputSrcs.insert(*outPath);
}
}
}
/* Ensure that the inputs exist in the destination store (so that
the builder can substitute them from the destination
store). This is a no-op for regular stores, but for the binary
cache store, this will copy the inputs to the binary cache from
the local store. */
{
auto now1 = std::chrono::steady_clock::now();
debug("sending closure of '%s' to '%s'",
evalStore->printStorePath(drvPath), destStore->getUri());
if (evalStore != destStore)
copyClosure(*evalStore, *destStore, drv.inputSrcs, NoRepair, NoCheckSigs);
copyClosure(*destStore, *buildStore, inputs, NoRepair, NoCheckSigs, Substitute);
auto now2 = std::chrono::steady_clock::now();
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
/* Perform the build. */
if (myArgs.logPath)
logger = new MyLogger(*logger, *myArgs.logPath);
auto buildResult = buildStore->buildDerivation(drvPath, basicDrv);
/* Copy the output paths from the build store to the destination
store. */
size_t totalNarSize = 0;
if (buildResult.success()) {
std::map<StorePath, ValidPathInfo> infos;
StorePathSet outputs;
for (auto & [output, realisation] : buildResult.builtOutputs) {
auto info = buildStore->queryPathInfo(realisation.outPath);
totalNarSize += info->narSize;
infos.insert_or_assign(info->path, *info);
outputs.insert(info->path);
}
if ((!myArgs.maxOutputSize || totalNarSize <= *myArgs.maxOutputSize)
&& buildStore != destStore)
{
debug("copying outputs of '%s' from '%s' (%d bytes)",
buildStore->printStorePath(drvPath), buildStore->getUri(), totalNarSize);
auto now1 = std::chrono::steady_clock::now();
copyPaths(*buildStore, *destStore, outputs, NoRepair, NoCheckSigs);
auto now2 = std::chrono::steady_clock::now();
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
}
FdSink to { STDOUT_FILENO };
WorkerProto::WriteConn wconn {
.to = to,
// Hardcode latest version because we are deploying hydra
// itself atomically
.version = PROTOCOL_VERSION,
};
WorkerProto::write(*evalStore, wconn, buildResult);
}
int main(int argc, char * * argv)
{
return handleExceptions(argv[0], [&]() {
initNix();
mainWrapped(argvToStrings(argc, argv));
});
}

View File

@@ -1,14 +0,0 @@
srcs = files(
'hydra-build-step.cc',
)
hydra_build_step = executable('hydra-build-step',
'hydra-build-step.cc',
srcs,
dependencies: [
libhydra_dep,
nix_dep,
dependency('nix-cmd', required: true)
],
install: true,
)

View File

@@ -1,27 +1,178 @@
#include <math.h>
#include <algorithm>
#include <cmath>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "build-result.hh"
#include "path.hh"
#include "ssh-store.hh"
#include "serve-protocol.hh"
#include "state.hh"
#include "current-process.hh"
#include "processes.hh"
#include "util.hh"
#include "ssh.hh"
#include "finally.hh"
#include "url.hh"
#include "worker-protocol.hh"
using namespace nix;
bool ::Machine::isLocalhost() const
{
return storeUri.params.empty() && std::visit(overloaded {
[](const StoreReference::Auto &) {
return true;
},
[](const StoreReference::Specified & s) {
return
(s.scheme == "local" || s.scheme == "unix") ||
((s.scheme == "ssh" || s.scheme == "ssh-ng") &&
s.authority == "localhost");
},
}, storeUri.variant);
}
namespace nix::build_remote {
static Path createLogFileDir(const std::string & logDir, const StorePath & drvPath)
static std::pair<Path, AutoCloseFD> openLogFile(const std::string & logDir, const StorePath & drvPath)
{
std::string base(drvPath.to_string());
auto logFile = logDir + "/" + std::string(base, 0, 2) + "/" + std::string(base, 2);
createDirs(dirOf(logFile));
return logFile;
AutoCloseFD logFD = open(logFile.c_str(), O_CREAT | O_TRUNC | O_WRONLY, 0666);
if (!logFD) throw SysError("creating log file %s", logFile);
return {std::move(logFile), std::move(logFD)};
}
static BasicDerivation sendInputs(
State & state,
Step & step,
Store & localStore,
Store & destStore,
::Machine::Connection & conn,
unsigned int & overhead,
counter & nrStepsWaiting,
counter & nrStepsCopyingTo
)
{
/* Replace the input derivations by their output paths to send a
minimal closure to the builder.
`tryResolve` currently does *not* rewrite input addresses, so it
is safe to do this in all cases. (It should probably have a mode
to do that, however, but we would not use it here.)
*/
BasicDerivation basicDrv = ({
auto maybeBasicDrv = step.drv->tryResolve(destStore, &localStore);
if (!maybeBasicDrv)
throw Error(
"the derivation '%s' cant be resolved. Its probably "
"missing some outputs",
localStore.printStorePath(step.drvPath));
*maybeBasicDrv;
});
/* Ensure that the inputs exist in the destination store. This is
a no-op for regular stores, but for the binary cache store,
this will copy the inputs to the binary cache from the local
store. */
if (&localStore != &destStore) {
copyClosure(localStore, destStore,
step.drv->inputSrcs,
NoRepair, NoCheckSigs, NoSubstitute);
}
{
auto mc1 = std::make_shared<MaintainCount<counter>>(nrStepsWaiting);
mc1.reset();
MaintainCount<counter> mc2(nrStepsCopyingTo);
printMsg(lvlDebug, "sending closure of %s to %s",
localStore.printStorePath(step.drvPath), conn.machine->storeUri.render());
auto now1 = std::chrono::steady_clock::now();
/* Copy the input closure. */
copyClosure(
destStore,
conn.machine->isLocalhost() ? localStore : *conn.store,
basicDrv.inputSrcs,
NoRepair,
NoCheckSigs,
Substitute);
auto now2 = std::chrono::steady_clock::now();
overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
return basicDrv;
}
static BuildResult performBuild(
::Machine::Connection & conn,
Store & localStore,
StorePath drvPath,
const BasicDerivation & drv,
counter & nrStepsBuilding
)
{
auto kont = conn.store->buildDerivationAsync(drvPath, drv, bmNormal);
BuildResult result;
time_t startTime, stopTime;
startTime = time(0);
{
MaintainCount<counter> mc(nrStepsBuilding);
result = kont();
// Without proper call-once functions, we need to manually
// delete after calling.
kont = {};
}
stopTime = time(0);
if (!result.startTime) {
// If the builder gave `startTime = 0`, use our measurements
// instead of the builder's.
//
// Note: this represents the duration of a single round, rather
// than all rounds.
result.startTime = startTime;
result.stopTime = stopTime;
}
// If the protocol was too old to give us `builtOutputs`, initialize
// it manually by introspecting the derivation.
if (GET_PROTOCOL_MINOR(conn.store->getProtocol()) < 6)
{
// If the remote is too old to handle CA derivations, we cant get this
// far anyways
assert(drv.type().hasKnownOutputPaths());
DerivationOutputsAndOptPaths drvOutputs = drv.outputsAndOptPaths(localStore);
// Since this a `BasicDerivation`, `staticOutputHashes` will not
// do any real work.
auto outputHashes = staticOutputHashes(localStore, drv);
for (auto & [outputName, output] : drvOutputs) {
auto outputPath = output.second;
// Weve just asserted that the output paths of the derivation
// were known
assert(outputPath);
auto outputHash = outputHashes.at(outputName);
auto drvOutput = DrvOutput { outputHash, outputName };
result.builtOutputs.insert_or_assign(
std::move(outputName),
Realisation { drvOutput, *outputPath });
}
}
return result;
}
}
@@ -30,14 +181,11 @@ static Path createLogFileDir(const std::string & logDir, const StorePath & drvPa
void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
{
// FIXME: make RemoteResult inherit BuildResult.
startTime = buildResult.startTime;
stopTime = buildResult.stopTime;
timesBuilt = buildResult.timesBuilt;
errorMsg = buildResult.errorMsg;
isNonDeterministic = buildResult.isNonDeterministic;
if (buildResult.startTime && buildResult.stopTime) {
startTime = buildResult.startTime;
stopTime = buildResult.stopTime;
}
switch ((BuildResult::Status) buildResult.status) {
case BuildResult::Built:
@@ -89,56 +237,57 @@ void RemoteResult::updateWithBuildResult(const nix::BuildResult & buildResult)
void State::buildRemote(ref<Store> destStore,
::Machine::ptr machine, Step::ptr step,
const ServeProto::BuildOptions & buildOptions,
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
std::function<void(StepState)> updateStep,
NarMemberDatas & narMembers)
{
assert(BuildResult::TimedOut == 8);
result.logFile = build_remote::createLogFileDir(logDir, step->drvPath);
auto [logFile, logFD] = build_remote::openLogFile(logDir, step->drvPath);
AutoDelete logFileDel(logFile, false);
result.logFile = logFile;
try {
updateStep(ssBuilding);
result.startTime = time(0);
updateStep(ssConnecting);
auto buildStoreUrl = machine->completeStoreReference().render();
// FIXME: rewrite to use Store.
::Machine::Connection conn {
.machine = machine,
.store = [&]{
auto * pSpecified = std::get_if<StoreReference::Specified>(&machine->storeUri.variant);
if (!pSpecified || pSpecified->scheme != "ssh-ng") {
throw Error("Currently, only ssh-ng:// stores are supported!");
}
Strings args = {
localStore->printStorePath(step->drvPath),
"--store", destStore->getUri(),
"--eval-store", localStore->getUri(),
"--build-store", buildStoreUrl,
"--max-silent-time", std::to_string(buildOptions.maxSilentTime),
"--timeout", std::to_string(buildOptions.buildTimeout),
"--max-build-log-size", std::to_string(buildOptions.maxLogSize),
"--max-output-size", std::to_string(maxOutputSize),
"--repeat", std::to_string(buildOptions.nrRepeats),
"--log-file", result.logFile,
// FIXME: step->isDeterministic
auto remoteStore = machine->openStore().dynamic_pointer_cast<RemoteStore>();
auto remoteStoreConfig = std::dynamic_pointer_cast<SSHStoreConfig>(remoteStore);
assert(remoteStore);
if (machine->isLocalhost()) {
auto rp_new = remoteStoreConfig->remoteProgram.get();
rp_new.push_back("--builders");
rp_new.push_back("");
const_cast<nix::Setting<Strings> &>(remoteStoreConfig->remoteProgram).assign(rp_new);
}
remoteStoreConfig->extraSshArgs = {
"-a", "-oBatchMode=yes", "-oConnectTimeout=60", "-oTCPKeepAlive=yes"
};
// TODO logging
//const_cast<nix::Setting<int> &>(remoteStore->logFD).assign(logFD.get());
return nix::ref{remoteStore};
}(),
};
// FIXME: set pid for cancellation
auto [status, childStdout] = [&]() {
MaintainCount<counter> mc(nrStepsBuilding);
return runProgram({
.program = "hydra-build-step",
.args = std::move(args),
});
}();
#if 0
{
auto activeStepState(activeStep->state_.lock());
if (activeStepState->cancelled) throw Error("step cancelled");
activeStepState->pid = conn.store->getConnectionPid();
}
Finally clearPid([&]() {
auto activeStepState(activeStep->state_.lock());
activeStepState->pid = -1;
/* FIXME: there is a slight race here with step
cancellation in State::processQueueChange(), which
@@ -147,32 +296,53 @@ void State::buildRemote(ref<Store> destStore,
possibility that we end up killing another
process. Meh. */
});
#endif
result.stopTime = time(0);
Finally updateStats([&]() {
// TODO
//auto stats = conn.store->getConnectionStats();
//bytesReceived += stats.bytesReceived;
//bytesSent += stats.bytesSent;
});
if (!statusOk(status))
throw ExecError(status, fmt("hydra-build-step %s with output:\n%s", statusToString(status), stdout));
/* The build was executed successfully, so clear the failure
count for this machine. */
{
auto info(machine->state->connectInfo.lock());
info->consecutiveFailures = 0;
}
StringSource from { childStdout };
/* Read the BuildResult from the child. */
WorkerProto::ReadConn rconn {
.from = from,
// Hardcode latest version because we are deploying hydra
// itself atomically
.version = PROTOCOL_VERSION,
};
result.overhead += readNum<uint64_t>(rconn.from);
auto totalNarSize = readNum<uint64_t>(rconn.from);
auto buildResult = WorkerProto::Serialise<BuildResult>::read(*localStore, rconn);
/* Gather the inputs. If the remote side is Nix <= 1.9, we have to
copy the entire closure of drvPath, as well as the required
outputs of the input derivations. On Nix > 1.9, we only need to
copy the immediate sources of the derivation and the required
outputs of the input derivations. */
updateStep(ssSendingInputs);
BasicDerivation resolvedDrv = build_remote::sendInputs(*this, *step, *localStore, *destStore, conn, result.overhead, nrStepsWaiting, nrStepsCopyingTo);
logFileDel.cancel();
/* Truncate the log to get rid of messages about substitutions
etc. on the remote system. */
if (lseek(logFD.get(), SEEK_SET, 0) != 0)
throw SysError("seeking to the start of log file %s", result.logFile);
if (ftruncate(logFD.get(), 0) == -1)
throw SysError("truncating log file %s", result.logFile);
logFD = -1;
/* Do the build. */
printMsg(lvlDebug, "building %s on %s",
localStore->printStorePath(step->drvPath),
machine->storeUri.render());
updateStep(ssBuilding);
BuildResult buildResult = build_remote::performBuild(
conn,
*localStore,
step->drvPath,
resolvedDrv,
nrStepsBuilding
);
result.updateWithBuildResult(buildResult);
@@ -180,13 +350,6 @@ void State::buildRemote(ref<Store> destStore,
result.errorMsg = "";
/* If the NAR size limit was exceeded, then hydra-build-step
will not have copied the output paths. */
if (totalNarSize > maxOutputSize) {
result.stepStatus = bsNarSizeLimitExceeded;
return;
}
/* If the path was substituted or already valid, then we didn't
get a build log. */
if (result.isCached) {
@@ -196,6 +359,50 @@ void State::buildRemote(ref<Store> destStore,
result.logFile = "";
}
StorePathSet outputs;
for (auto & [_, realisation] : buildResult.builtOutputs)
outputs.insert(realisation.outPath);
/* Copy the output paths. */
if (!machine->isLocalhost() || localStore != std::shared_ptr<Store>(destStore)) {
updateStep(ssReceivingOutputs);
MaintainCount<counter> mc(nrStepsCopyingFrom);
auto now1 = std::chrono::steady_clock::now();
/* Copy each path. */
printMsg(lvlDebug, "copying outputs of %s from %s",
localStore->printStorePath(step->drvPath), machine->storeUri.render());
copyClosure(*conn.store, *destStore, outputs);
auto now2 = std::chrono::steady_clock::now();
result.overhead += std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
}
/* Register the outputs of the newly built drv */
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto outputHashes = staticOutputHashes(*localStore, *step->drv);
for (auto & [outputName, realisation] : buildResult.builtOutputs) {
// Register the resolved drv output
destStore->registerDrvOutput(realisation);
// Also register the unresolved one
auto unresolvedRealisation = realisation;
unresolvedRealisation.signatures.clear();
unresolvedRealisation.id.drvHash = outputHashes.at(outputName);
destStore->registerDrvOutput(unresolvedRealisation);
}
}
/* Shut down the connection done by RAII.
Only difference is kill() instead of wait() (i.e. send signal
then wait())
*/
} catch (Error & e) {
/* Disable this machine until a certain period of time has
passed. This period increases on every consecutive

View File

@@ -3,7 +3,6 @@
#include "state.hh"
#include "hydra-build-result.hh"
#include "finally.hh"
#include "terminal.hh"
#include "binary-cache-store.hh"
using namespace nix;
@@ -99,13 +98,6 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
it). */
BuildID buildId;
std::optional<StorePath> buildDrvPath;
// Other fields set below
nix::ServeProto::BuildOptions buildOptions {
.maxLogSize = maxLogSize,
.nrRepeats = step->isDeterministic ? 1u : 0u,
.enforceDeterminism = step->isDeterministic,
.keepFailed = false,
};
auto conn(dbPool.get());
@@ -140,18 +132,19 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
{
auto i = jobsetRepeats.find(std::make_pair(build2->projectName, build2->jobsetName));
if (i != jobsetRepeats.end())
buildOptions.nrRepeats = std::max(buildOptions.nrRepeats, i->second);
warn("jobset repeats is deprecated; nix stopped supporting this correctly a long time ago.");
}
}
if (!build) build = *dependents.begin();
buildId = build->id;
buildDrvPath = build->drvPath;
buildOptions.maxSilentTime = build->maxSilentTime;
buildOptions.buildTimeout = build->buildTimeout;
settings.maxLogSize = maxLogSize;
settings.maxSilentTime = build->maxSilentTime;
settings.buildTimeout = build->buildTimeout;
printInfo("performing step %s %d times on %s (needed by build %d and %d others)",
localStore->printStorePath(step->drvPath), buildOptions.nrRepeats + 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
localStore->printStorePath(step->drvPath), 1, machine->storeUri.render(), buildId, (dependents.size() - 1));
}
if (!buildOneDone)
@@ -212,7 +205,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
try {
/* FIXME: referring builds may have conflicting timeouts. */
buildRemote(destStore, machine, step, buildOptions, result, activeStep, updateStep, narMembers);
buildRemote(destStore, machine, step, result, activeStep, updateStep, narMembers);
} catch (Error & e) {
if (activeStep->state_.lock()->cancelled) {
printInfo("marking step %d of build %d as cancelled", stepNr, buildId);
@@ -220,7 +213,7 @@ State::StepResult State::doBuildStep(nix::ref<Store> destStore,
result.canRetry = false;
} else {
result.stepStatus = bsAborted;
result.errorMsg = filterANSIEscapes(e.msg(), true);
result.errorMsg = e.msg();
result.canRetry = true;
}
}

View File

@@ -182,7 +182,7 @@ void State::monitorMachinesFile()
getEnv("NIX_REMOTE_SYSTEMS").value_or(pathExists(defaultMachinesFile) ? defaultMachinesFile : ""), ":");
if (machinesFiles.empty()) {
parseMachines("localhost " +
parseMachines("ssh-ng://localhost " +
(settings.thisSystem == "x86_64-linux" ? "x86_64-linux,i686-linux" : settings.thisSystem.get())
+ " - " + std::to_string(settings.maxBuildJobs) + " 1 "
+ concatStringsSep(",", StoreConfig::getDefaultSystemFeatures()));
@@ -550,11 +550,12 @@ void State::dumpStatus(Connection & conn)
{"nrQueuedBuilds", builds.lock()->size()},
{"nrActiveSteps", activeSteps_.lock()->size()},
{"nrStepsBuilding", nrStepsBuilding.load()},
#if 0
{"nrStepsCopyingTo", nrStepsCopyingTo.load()},
{"nrStepsCopyingFrom", nrStepsCopyingFrom.load()},
#endif
{"nrStepsWaiting", nrStepsWaiting.load()},
{"nrUnsupportedSteps", nrUnsupportedSteps.load()},
{"bytesSent", bytesSent.load()},
{"bytesReceived", bytesReceived.load()},
{"nrBuildsRead", nrBuildsRead.load()},
{"buildReadTimeMs", buildReadTimeMs.load()},
{"buildReadTimeAvgMs", nrBuildsRead == 0 ? 0.0 : (float) buildReadTimeMs / nrBuildsRead},

View File

@@ -20,7 +20,7 @@
#include "store-api.hh"
#include "sync.hh"
#include "nar-extractor.hh"
#include "serve-protocol.hh"
#include "ssh-store.hh"
#include "machines.hh"
@@ -288,10 +288,18 @@ struct Machine : nix::Machine
}
bool isLocalhost() const;
// A connection to a machine
struct Connection {
// Backpointer to the machine
ptr machine;
// Opened store
nix::ref<nix::RemoteStore> store;
};
};
class HydraConfig;
struct HydraConfig;
class State
@@ -352,10 +360,9 @@ private:
counter nrStepsStarted{0};
counter nrStepsDone{0};
counter nrStepsBuilding{0};
#if 0
counter nrStepsCopyingTo{0};
counter nrStepsCopyingFrom{0};
#endif
counter nrStepsWaiting{0};
counter nrUnsupportedSteps{0};
counter nrRetries{0};
counter maxNrRetries{0};
@@ -364,6 +371,8 @@ private:
counter nrQueueWakeups{0};
counter nrDispatcherWakeups{0};
counter dispatchTimeMs{0};
counter bytesSent{0};
counter bytesReceived{0};
counter nrActiveDbUpdates{0};
/* Specific build to do for --build-one (testing only). */
@@ -533,7 +542,6 @@ private:
void buildRemote(nix::ref<nix::Store> destStore,
Machine::ptr machine, Step::ptr step,
const nix::ServeProto::BuildOptions & buildOptions,
RemoteResult & result, std::shared_ptr<ActiveStep> activeStep,
std::function<void(StepState)> updateStep,
NarMemberDatas & narMembers);

View File

@@ -1,6 +1,5 @@
# Native code
subdir('libhydra')
subdir('hydra-build-step')
subdir('hydra-evaluator')
subdir('hydra-queue-runner')

View File

@@ -28,7 +28,6 @@ testenv.prepend('PERL5LIB',
)
testenv.prepend('PATH',
fs.parent(find_program('nix').full_path()),
fs.parent(hydra_build_step.full_path()),
fs.parent(hydra_evaluator.full_path()),
fs.parent(hydra_queue_runner.full_path()),
meson.project_source_root() / 'src/script',