Compare commits
17 Commits
de600fe7c7
...
microvm-di
| Author | SHA1 | Date | |
|---|---|---|---|
|
e8c188debf
|
|||
|
1f559d93ba
|
|||
|
a03b7506c5
|
|||
|
3b2a7cedc5
|
|||
|
a48e271853
|
|||
|
d202a3d0cb
|
|||
| ef33833910 | |||
| d73031e7f1 | |||
| be0bb0b08b | |||
| 026494c877 | |||
| 3021716640 | |||
| 70ec63f213 | |||
| 91d86c49a1 | |||
| 96dee29595 | |||
| d5e94b50cb | |||
| 286e03c853 | |||
| 766b738a6a |
@@ -21,18 +21,3 @@ Testing disko partitioning is working quite well. Just run the following and che
|
||||
```bash
|
||||
nix run -L .\#nixosConfigurations.fanny.config.system.build.vmWithDisko
|
||||
```
|
||||
|
||||
Only problem is that encryption is not working, so it needs to be commented out. For testing host fanny the following parts in ```./machines/modules/disko/fanny.nix``` need to be commented out(for both pools!):
|
||||
```nix
|
||||
datasets = {
|
||||
encrypted = {
|
||||
options = {
|
||||
encryption = "aes-256-gcm"; #THIS ONE
|
||||
keyformat = "passphrase"; #THIS ONE
|
||||
keylocation = "file:///tmp/root.key"; #THIS ONE
|
||||
};
|
||||
# use this to read the key during boot
|
||||
postCreateHook = '' #THIS ONE
|
||||
zfs set keylocation="prompt" "zroot/$name"; #THIS ONE
|
||||
''; #THIS ONE
|
||||
```
|
||||
|
||||
@@ -18,6 +18,7 @@ in
|
||||
inputs.self.nixosModules.malobeo.microvm
|
||||
inputs.self.nixosModules.malobeo.metrics
|
||||
inputs.self.nixosModules.malobeo.users
|
||||
inputs.self.nixosModules.malobeo.backup
|
||||
];
|
||||
|
||||
virtualisation.vmVariantWithDisko = {
|
||||
@@ -42,6 +43,11 @@ in
|
||||
cacheurl = "https://cache.dynamicdiscord.de";
|
||||
};
|
||||
|
||||
malobeo.backup = {
|
||||
enable = true;
|
||||
snapshots = [ "storage/encrypted" "zroot/encrypted/var" ];
|
||||
};
|
||||
|
||||
nix = {
|
||||
settings.experimental-features = [ "nix-command" "flakes" ];
|
||||
#always update microvms
|
||||
@@ -53,6 +59,7 @@ in
|
||||
malobeo.users = {
|
||||
malobeo = true;
|
||||
admin = true;
|
||||
backup = true;
|
||||
};
|
||||
|
||||
malobeo.disks = {
|
||||
|
||||
@@ -195,8 +195,7 @@ rec {
|
||||
|
||||
vmNestedMicroVMOverwrites = host: sopsDummy: {
|
||||
|
||||
services.malobeo.microvm.deployHosts = pkgs.lib.mkForce [];
|
||||
microvm.vms =
|
||||
microvm.vms = pkgs.lib.mkForce (
|
||||
let
|
||||
# Map the values to each hostname to then generate an Attrset using listToAttrs
|
||||
mapperFunc = name: { inherit name; value = {
|
||||
@@ -216,7 +215,7 @@ rec {
|
||||
};
|
||||
}; };
|
||||
in
|
||||
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
|
||||
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts));
|
||||
};
|
||||
|
||||
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: fwdPort: (self.nixosConfigurations.${host}.extendModules {
|
||||
|
||||
102
machines/modules/malobeo/backup.nix
Normal file
102
machines/modules/malobeo/backup.nix
Normal file
@@ -0,0 +1,102 @@
|
||||
{ config, lib, pkgs, ... }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.malobeo.backup;
|
||||
hostToCommand = (hostname: datasetNames:
|
||||
(map (dataset: {
|
||||
name = "${hostname}_${dataset.sourceDataset}";
|
||||
value = {
|
||||
inherit hostname;
|
||||
inherit (dataset) sourceDataset targetDataset;
|
||||
};
|
||||
} ) datasetNames));
|
||||
peers = import ./peers.nix;
|
||||
|
||||
enableSnapshots = cfg.snapshots != null;
|
||||
enableBackups = cfg.hosts != null;
|
||||
in
|
||||
{
|
||||
options.malobeo.backup = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Enable sanoid/syncoid based backup functionality";
|
||||
};
|
||||
|
||||
snapshots = mkOption {
|
||||
type = types.nullOr (types.listOf types.str);
|
||||
default = null;
|
||||
description = "Automatic snapshots will be created for the given datasets";
|
||||
};
|
||||
|
||||
hosts = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr (types.attrsOf (types.listOf (types.submodule {
|
||||
options = {
|
||||
sourceDataset = mkOption {
|
||||
type = types.str;
|
||||
description = "The source that needs to be backed up";
|
||||
};
|
||||
targetDataset = mkOption {
|
||||
type = types.str;
|
||||
description = "The target dataset where the backup should be stored";
|
||||
};
|
||||
};
|
||||
})));
|
||||
description = ''
|
||||
Hostname with list of datasets to backup. This option should be defined on hosts that will store backups.
|
||||
|
||||
It is necessary to add the machines that get backed up to known hosts.
|
||||
This can be done for example systemwide using
|
||||
programs.ssh.knownHosts."10.100.0.101" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHqp2/YiiIhai7wyScGZJ20gtrzY+lp4N/8unyRs4qhc";
|
||||
Or set it for the syncoid user directly.
|
||||
'';
|
||||
};
|
||||
|
||||
sshKey = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
description = "Set path to ssh key used for pull backups. Otherwise default key is used";
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.enable) {
|
||||
services.sanoid = mkIf (enableSnapshots) {
|
||||
enable = true;
|
||||
|
||||
templates."default" = {
|
||||
hourly = 24;
|
||||
daily = 30; #keep 30 daily snapshots
|
||||
monthly = 6; #keep 6 monthly backups
|
||||
yearly = 0;
|
||||
|
||||
autosnap = true; #take snapshots automatically
|
||||
autoprune = true; #delete old snapshots
|
||||
};
|
||||
|
||||
datasets = builtins.listToAttrs (map (name: { inherit name; value = {
|
||||
useTemplate = [ "default" ];
|
||||
recursive = true;
|
||||
}; }) cfg.snapshots);
|
||||
};
|
||||
|
||||
services.syncoid = mkIf (enableBackups) {
|
||||
enable = true;
|
||||
sshKey = cfg.sshKey;
|
||||
|
||||
commonArgs = [
|
||||
"--no-sync-snap"
|
||||
];
|
||||
|
||||
interval = "*-*-* 04:15:00";
|
||||
|
||||
commands = builtins.mapAttrs (name: value: {
|
||||
source = "backup@${peers.${value.hostname}.address}:${value.sourceDataset}";
|
||||
target = "${value.targetDataset}";
|
||||
sendOptions = "w";
|
||||
recvOptions = "\"\"";
|
||||
recursive = true;
|
||||
})(builtins.listToAttrs (builtins.concatLists (builtins.attrValues (builtins.mapAttrs hostToCommand cfg.hosts))));
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -102,6 +102,22 @@ in
|
||||
/run/current-system/sw/bin/microvm -Ru ${name}
|
||||
'';
|
||||
};
|
||||
|
||||
"microvm-init-dirs@${name}" = {
|
||||
description = "Initialize microvm directories";
|
||||
after = [ "zfs-mount.service" ];
|
||||
wantedBy = [ "microvm@${name}.service" ];
|
||||
unitConfig.ConditionPathExists = "!/var/lib/microvms/${name}/.is_initialized";
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
};
|
||||
script = ''
|
||||
mkdir -p /var/lib/microvms/${name}/var
|
||||
mkdir -p /var/lib/microvms/${name}/etc
|
||||
mkdir -p /var/lib/microvms/data/${name}
|
||||
touch /var/lib/microvms/${name}/.is_initialized
|
||||
'';
|
||||
};
|
||||
}) {} (cfg.deployHosts);
|
||||
|
||||
systemd.timers = builtins.foldl' (timers: name: timers // {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"vpn" = {
|
||||
role = "server";
|
||||
publicIp = "5.9.153.217";
|
||||
address = [ "10.100.0.1/24" ];
|
||||
address = "10.100.0.1";
|
||||
allowedIPs = [ "10.100.0.0/24" ];
|
||||
listenPort = 51821;
|
||||
publicKey = "hF9H10Y8Ar7zvZXFoNM8LSoaYFgPCXv30c54SSEucX4=";
|
||||
@@ -11,36 +11,43 @@
|
||||
|
||||
"celine" = {
|
||||
role = "client";
|
||||
address = [ "10.100.0.2/24" ];
|
||||
address = "10.100.0.2";
|
||||
allowedIPs = [ "10.100.0.2/32" ];
|
||||
publicKey = "Jgx82tSOmZJS4sm1o8Eci9ahaQdQir2PLq9dBqsWZw4=";
|
||||
};
|
||||
|
||||
"desktop" = {
|
||||
role = "client";
|
||||
address = [ "10.100.0.3/24" ];
|
||||
address = "10.100.0.3";
|
||||
allowedIPs = [ "10.100.0.3/32" ];
|
||||
publicKey = "FtY2lcdWcw+nvtydOOUDyaeh/xkaqHA8y9GXzqU0Am0=";
|
||||
};
|
||||
|
||||
"atlan-pc" = {
|
||||
role = "client";
|
||||
address = [ "10.100.0.5/24" ];
|
||||
address = "10.100.0.5";
|
||||
allowedIPs = [ "10.100.0.5/32" ];
|
||||
publicKey = "TrJ4UAF//zXdaLwZudI78L+rTC36zEDodTDOWNS4Y1Y=";
|
||||
};
|
||||
|
||||
"hetzner" = {
|
||||
role = "client";
|
||||
address = [ "10.100.0.6/24" ];
|
||||
address = "10.100.0.6";
|
||||
allowedIPs = [ "10.100.0.6/32" ];
|
||||
publicKey = "csRzgwtnzmSLeLkSwTwEOrdKq55UOxZacR5D3GopCTQ=";
|
||||
};
|
||||
|
||||
"fanny" = {
|
||||
role = "client";
|
||||
address = [ "10.100.0.101/24" ];
|
||||
address = "10.100.0.101";
|
||||
allowedIPs = [ "10.100.0.101/32" ];
|
||||
publicKey = "3U59F6T1s/1LaZBIa6wB0qsVuO6pRR9jfYZJIH2piAU=";
|
||||
};
|
||||
|
||||
"backup0" = {
|
||||
role = "client";
|
||||
address = "10.100.0.20";
|
||||
allowedIPs = [ "10.100.0.20/32" ];
|
||||
publicKey = "Pp55Jg//jREzHdbbIqTXc9N7rnLZIFw904qh6NLrACE=";
|
||||
};
|
||||
}
|
||||
|
||||
@@ -68,7 +68,11 @@ in
|
||||
users = [ "backup" ];
|
||||
commands = [
|
||||
{
|
||||
command = "${pkgs.zfs-user}/bin/zfs";
|
||||
command = "/run/current-system/sw/bin/zfs";
|
||||
options = [ "NOPASSWD" ];
|
||||
}
|
||||
{
|
||||
command = "/run/current-system/sw/bin/zpool";
|
||||
options = [ "NOPASSWD" ];
|
||||
}
|
||||
];
|
||||
@@ -94,4 +98,4 @@ in
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ in
|
||||
interfaces = {
|
||||
malovpn = {
|
||||
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
|
||||
address = myPeer.address;
|
||||
address = [ "${myPeer.address}/24" ];
|
||||
autostart = cfg.autostart;
|
||||
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;
|
||||
|
||||
|
||||
@@ -116,6 +116,7 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
|
||||
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
|
||||
disko.imports = [ ./machines/modules/disko ];
|
||||
users.imports = [ ./machines/modules/malobeo/users.nix ];
|
||||
backup.imports = [ ./machines/modules/malobeo/backup.nix ];
|
||||
};
|
||||
|
||||
hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) (
|
||||
|
||||
Reference in New Issue
Block a user