7 Commits

Author SHA1 Message Date
be0bb0b08b [backup] fix description
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m13s
2025-03-16 12:53:43 +01:00
026494c877 [backup] fix typo
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m12s
2025-03-16 11:25:37 +01:00
3021716640 [backup] update module descriptions
Some checks failed
Check flake syntax / flake-check (push) Failing after 2m16s
2025-03-16 11:15:52 +01:00
70ec63f213 [users] fix typo
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m13s
2025-03-16 10:24:17 +01:00
91d86c49a1 [fanny] enable automatic snapshots
Some checks failed
Check flake syntax / flake-check (push) Failing after 3m0s
2025-03-16 10:18:57 +01:00
96dee29595 [fanny] enable backup user 2025-03-16 10:18:39 +01:00
d5e94b50cb [backup] fix errors
All checks were successful
Check flake syntax / flake-check (push) Successful in 5m44s
2025-03-16 10:09:54 +01:00
3 changed files with 48 additions and 23 deletions

View File

@@ -18,6 +18,7 @@ in
inputs.self.nixosModules.malobeo.microvm
inputs.self.nixosModules.malobeo.metrics
inputs.self.nixosModules.malobeo.users
inputs.self.nixosModules.malobeo.backup
];
virtualisation.vmVariantWithDisko = {
@@ -42,6 +43,11 @@ in
cacheurl = "https://cache.dynamicdiscord.de";
};
malobeo.backup = {
enable = true;
snapshots = [ "storage/encrypted" "zroot/encrypted/var" ];
};
nix = {
settings.experimental-features = [ "nix-command" "flakes" ];
#always update microvms
@@ -53,6 +59,7 @@ in
malobeo.users = {
malobeo = true;
admin = true;
backup = true;
};
malobeo.disks = {

View File

@@ -2,42 +2,66 @@
with lib;
let
cfg = config.malobeo.backup;
newfunc = (hostname: datasetNames: (map (dataset: { name = "${hostname}_${dataset.sourceDataset}"; value = { inherit hostname; inherit dataset; }; } ) datasetNames));
hostToCommand = (hostname: datasetNames:
(map (dataset: {
name = "${hostname}_${dataset.sourceDataset}";
value = {
inherit hostname;
inherit (dataset) sourceDataset targetDataset;
};
} ) datasetNames));
peers = import ./peers.nix;
enableSnapshots = cfg.snapshots != null;
enableBackups = cfg.hosts != null;
in
{
options.malobeo.backup = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable sharing metrics";
description = "Enable sanoid/syncoid based backup functionality";
};
snapshots = mkOption {
type = types.listOf types.str;
default = [];
type = types.nullOr (types.listOf types.str);
default = null;
description = "Automatic snapshots will be created for the given datasets";
};
#TODO: instead listof str we need dataset here to declare the dataset name on the source host
# and also the dataset name on target host (which stores the backups)
hosts = mkOption {
type = types.attrsOf (types.listOf (types.submodule {
default = null;
type = types.nullOr (types.attrsOf (types.listOf (types.submodule {
options = {
sourceDataset = mkOption {
type = types.str;
description = "The source that needs to be backed up";
};
targetDataset = mkOption {
type = types.str;
description = "The target dataset where the backup should be stored";
};
};
}));
description = "Hostname with list of datasets to backup.";
})));
description = ''
Hostname with list of datasets to backup. This option should be defined on hosts that will store backups.
It is necessary to add the machines that get backed up to known hosts.
This can be done for example systemwide using
programs.ssh.knownHosts."10.100.0.101" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHqp2/YiiIhai7wyScGZJ20gtrzY+lp4N/8unyRs4qhc";
Or set it for the syncoid user directly.
'';
};
sshKey = mkOption {
default = null;
type = types.nullOr types.str;
description = "Set path to ssh key used for pull backups. Otherwise default key is used";
};
};
config = mkIf (cfg.enable) {
services.sanoid = {
services.sanoid = mkIf (enableSnapshots) {
enable = true;
templates."default" = {
@@ -56,10 +80,9 @@ in
}; }) cfg.snapshots);
};
services.syncoid = with config; {
services.syncoid = mkIf (enableBackups) {
enable = true;
sshKey = sops.secrets.backup_key.path;
sshKey = cfg.sshKey;
commonArgs = [
"--no-sync-snap"
@@ -68,17 +91,12 @@ in
interval = "*-*-* 04:15:00";
commands = builtins.mapAttrs (name: value: {
source = "backup@${peers.${value.hostname}.address}:${value.dataset.sourceDataset}";
target = "${value.dataset.targetDataset}";
source = "backup@${peers.${value.hostname}.address}:${value.sourceDataset}";
target = "${value.targetDataset}";
sendOptions = "w";
recvOptions = "\"\"";
recursive = true;
})(builtins.listToAttrs (builtins.concatLists (builtins.attrValues (builtins.mapAttrs newfunc cfg.hosts))));
};
sops.secrets.backup_key = {
owner = config.services.syncoid.user;
key = "backup_key";
})(builtins.listToAttrs (builtins.concatLists (builtins.attrValues (builtins.mapAttrs hostToCommand cfg.hosts))));
};
};
}

View File

@@ -68,7 +68,7 @@ in
users = [ "backup" ];
commands = [
{
command = "${pkgs.zfs-user}/bin/zfs";
command = "${pkgs.zfs}/bin/zfs";
options = [ "NOPASSWD" ];
}
];
@@ -94,4 +94,4 @@ in
];
}
];
}
}