backup module #92
@@ -18,6 +18,7 @@ in
|
|||||||
inputs.self.nixosModules.malobeo.microvm
|
inputs.self.nixosModules.malobeo.microvm
|
||||||
inputs.self.nixosModules.malobeo.metrics
|
inputs.self.nixosModules.malobeo.metrics
|
||||||
inputs.self.nixosModules.malobeo.users
|
inputs.self.nixosModules.malobeo.users
|
||||||
|
inputs.self.nixosModules.malobeo.backup
|
||||||
];
|
];
|
||||||
|
|
||||||
virtualisation.vmVariantWithDisko = {
|
virtualisation.vmVariantWithDisko = {
|
||||||
@@ -42,6 +43,11 @@ in
|
|||||||
cacheurl = "https://cache.dynamicdiscord.de";
|
cacheurl = "https://cache.dynamicdiscord.de";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
malobeo.backup = {
|
||||||
|
enable = true;
|
||||||
|
snapshots = [ "storage/encrypted" "zroot/encrypted/var" ];
|
||||||
|
};
|
||||||
|
|
||||||
nix = {
|
nix = {
|
||||||
settings.experimental-features = [ "nix-command" "flakes" ];
|
settings.experimental-features = [ "nix-command" "flakes" ];
|
||||||
#always update microvms
|
#always update microvms
|
||||||
@@ -53,6 +59,7 @@ in
|
|||||||
malobeo.users = {
|
malobeo.users = {
|
||||||
malobeo = true;
|
malobeo = true;
|
||||||
admin = true;
|
admin = true;
|
||||||
|
backup = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
malobeo.disks = {
|
malobeo.disks = {
|
||||||
|
|||||||
102
machines/modules/malobeo/backup.nix
Normal file
102
machines/modules/malobeo/backup.nix
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
with lib;
|
||||||
|
let
|
||||||
|
cfg = config.malobeo.backup;
|
||||||
|
hostToCommand = (hostname: datasetNames:
|
||||||
|
(map (dataset: {
|
||||||
|
name = "${hostname}_${dataset.sourceDataset}";
|
||||||
|
value = {
|
||||||
|
inherit hostname;
|
||||||
|
inherit (dataset) sourceDataset targetDataset;
|
||||||
|
};
|
||||||
|
} ) datasetNames));
|
||||||
|
peers = import ./peers.nix;
|
||||||
|
|
||||||
|
enableSnapshots = cfg.snapshots != null;
|
||||||
|
enableBackups = cfg.hosts != null;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.malobeo.backup = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Enable sanoid/syncoid based backup functionality";
|
||||||
|
};
|
||||||
|
|
||||||
|
snapshots = mkOption {
|
||||||
|
type = types.nullOr (types.listOf types.str);
|
||||||
|
default = null;
|
||||||
|
description = "Automatic snapshots will be created for the given datasets";
|
||||||
|
};
|
||||||
|
|
||||||
|
hosts = mkOption {
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr (types.attrsOf (types.listOf (types.submodule {
|
||||||
|
options = {
|
||||||
|
sourceDataset = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "The source that needs to be backed up";
|
||||||
|
};
|
||||||
|
targetDataset = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "The target dataset where the backup should be stored";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
})));
|
||||||
|
description = ''
|
||||||
|
Hostname with list of datasets to backup. This option should be defined on hosts that will store backups.
|
||||||
|
|
||||||
|
It is necessary to add the machines that get backed up to known hosts.
|
||||||
|
This can be done for example systemwide using
|
||||||
|
programs.ssh.knownHosts."10.100.0.101" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHqp2/YiiIhai7wyScGZJ20gtrzY+lp4N/8unyRs4qhc";
|
||||||
|
Or set it for the syncoid user directly.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
sshKey = mkOption {
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
description = "Set path to ssh key used for pull backups. Otherwise default key is used";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf (cfg.enable) {
|
||||||
|
services.sanoid = mkIf (enableSnapshots) {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
templates."default" = {
|
||||||
|
hourly = 0;
|
||||||
|
daily = 30; #keep 30 daily snapshots
|
||||||
|
monthly = 6; #keep 6 monthly backups
|
||||||
|
yearly = 0;
|
||||||
|
|
||||||
|
autosnap = true; #take snapshots automatically
|
||||||
|
autoprune = true; #delete old snapshots
|
||||||
|
};
|
||||||
|
|
||||||
|
datasets = builtins.listToAttrs (map (name: { inherit name; value = {
|
||||||
|
useTemplate = [ "default" ];
|
||||||
|
recursive = true;
|
||||||
|
}; }) cfg.snapshots);
|
||||||
|
};
|
||||||
|
|
||||||
|
services.syncoid = mkIf (enableBackups) {
|
||||||
|
enable = true;
|
||||||
|
sshKey = cfg.sshKey;
|
||||||
|
|
||||||
|
commonArgs = [
|
||||||
|
"--no-sync-snap"
|
||||||
|
];
|
||||||
|
|
||||||
|
interval = "*-*-* 04:15:00";
|
||||||
|
|
||||||
|
commands = builtins.mapAttrs (name: value: {
|
||||||
|
source = "backup@${peers.${value.hostname}.address}:${value.sourceDataset}";
|
||||||
|
target = "${value.targetDataset}";
|
||||||
|
sendOptions = "w";
|
||||||
|
recvOptions = "\"\"";
|
||||||
|
recursive = true;
|
||||||
|
})(builtins.listToAttrs (builtins.concatLists (builtins.attrValues (builtins.mapAttrs hostToCommand cfg.hosts))));
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
"vpn" = {
|
"vpn" = {
|
||||||
role = "server";
|
role = "server";
|
||||||
publicIp = "5.9.153.217";
|
publicIp = "5.9.153.217";
|
||||||
address = [ "10.100.0.1/24" ];
|
address = "10.100.0.1";
|
||||||
allowedIPs = [ "10.100.0.0/24" ];
|
allowedIPs = [ "10.100.0.0/24" ];
|
||||||
listenPort = 51821;
|
listenPort = 51821;
|
||||||
publicKey = "hF9H10Y8Ar7zvZXFoNM8LSoaYFgPCXv30c54SSEucX4=";
|
publicKey = "hF9H10Y8Ar7zvZXFoNM8LSoaYFgPCXv30c54SSEucX4=";
|
||||||
@@ -11,35 +11,35 @@
|
|||||||
|
|
||||||
"celine" = {
|
"celine" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.2/24" ];
|
address = "10.100.0.2";
|
||||||
allowedIPs = [ "10.100.0.2/32" ];
|
allowedIPs = [ "10.100.0.2/32" ];
|
||||||
publicKey = "Jgx82tSOmZJS4sm1o8Eci9ahaQdQir2PLq9dBqsWZw4=";
|
publicKey = "Jgx82tSOmZJS4sm1o8Eci9ahaQdQir2PLq9dBqsWZw4=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"desktop" = {
|
"desktop" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.3/24" ];
|
address = "10.100.0.3";
|
||||||
allowedIPs = [ "10.100.0.3/32" ];
|
allowedIPs = [ "10.100.0.3/32" ];
|
||||||
publicKey = "FtY2lcdWcw+nvtydOOUDyaeh/xkaqHA8y9GXzqU0Am0=";
|
publicKey = "FtY2lcdWcw+nvtydOOUDyaeh/xkaqHA8y9GXzqU0Am0=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"atlan-pc" = {
|
"atlan-pc" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.5/24" ];
|
address = "10.100.0.5";
|
||||||
allowedIPs = [ "10.100.0.5/32" ];
|
allowedIPs = [ "10.100.0.5/32" ];
|
||||||
publicKey = "TrJ4UAF//zXdaLwZudI78L+rTC36zEDodTDOWNS4Y1Y=";
|
publicKey = "TrJ4UAF//zXdaLwZudI78L+rTC36zEDodTDOWNS4Y1Y=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"hetzner" = {
|
"hetzner" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.6/24" ];
|
address = "10.100.0.6";
|
||||||
allowedIPs = [ "10.100.0.6/32" ];
|
allowedIPs = [ "10.100.0.6/32" ];
|
||||||
publicKey = "csRzgwtnzmSLeLkSwTwEOrdKq55UOxZacR5D3GopCTQ=";
|
publicKey = "csRzgwtnzmSLeLkSwTwEOrdKq55UOxZacR5D3GopCTQ=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"fanny" = {
|
"fanny" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.101/24" ];
|
address = "10.100.0.101";
|
||||||
allowedIPs = [ "10.100.0.101/32" ];
|
allowedIPs = [ "10.100.0.101/32" ];
|
||||||
publicKey = "3U59F6T1s/1LaZBIa6wB0qsVuO6pRR9jfYZJIH2piAU=";
|
publicKey = "3U59F6T1s/1LaZBIa6wB0qsVuO6pRR9jfYZJIH2piAU=";
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ in
|
|||||||
users = [ "backup" ];
|
users = [ "backup" ];
|
||||||
commands = [
|
commands = [
|
||||||
{
|
{
|
||||||
command = "${pkgs.zfs-user}/bin/zfs";
|
command = "${pkgs.zfs}/bin/zfs";
|
||||||
options = [ "NOPASSWD" ];
|
options = [ "NOPASSWD" ];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
@@ -94,4 +94,4 @@ in
|
|||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ in
|
|||||||
interfaces = {
|
interfaces = {
|
||||||
malovpn = {
|
malovpn = {
|
||||||
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
|
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
|
||||||
address = myPeer.address;
|
address = [ "${myPeer.address}/24" ];
|
||||||
autostart = cfg.autostart;
|
autostart = cfg.autostart;
|
||||||
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;
|
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;
|
||||||
|
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
|
|||||||
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
|
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
|
||||||
disko.imports = [ ./machines/modules/disko ];
|
disko.imports = [ ./machines/modules/disko ];
|
||||||
users.imports = [ ./machines/modules/malobeo/users.nix ];
|
users.imports = [ ./machines/modules/malobeo/users.nix ];
|
||||||
|
backup.imports = [ ./machines/modules/malobeo/backup.nix ];
|
||||||
};
|
};
|
||||||
|
|
||||||
hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) (
|
hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) (
|
||||||
|
|||||||
Reference in New Issue
Block a user