1 Commits

Author SHA1 Message Date
e4f6cf2595 [user module] add backup usr
All checks were successful
Check flake syntax / flake-check (push) Successful in 5m49s
2025-03-11 18:19:26 +01:00
16 changed files with 102 additions and 5062 deletions

View File

@@ -1,19 +1,47 @@
# Create host with nixos-anywhere
We use a nixos-anywhere wrapper script to deploy new hosts.
The wrapper script takes care of copying persistent host keys before calling nixos-anywhere.
To accomplish that boot the host from a nixos image and setup a root password.
# Create host with disko-install
How to use disko-install is described here: https://github.com/nix-community/disko/blob/master/docs/disko-install.md
---
Here are the exact steps to get bakunin running:
First create machines/hostname/configuration.nix
Add hosts nixosConfiguration in machines/configurations.nix
Boot nixos installer on the Machine.
``` bash
sudo su
passwd
```
# establish network connection
wpa_passphrase "network" "password" > wpa.conf
wpa_supplicant -B -i wlp3s0 -c wpa.conf
ping 8.8.8.8
# if that works continue
After that get the hosts ip using `ip a` and start deployment from your own machine:
# generate a base hardware config
nixos-generate-config --root /tmp/config --no-filesystems
``` bash
# from infrastrucutre repository root dir:
nix develop .#
remote-install hostname 10.0.42.23
# get the infra repo
nix-shell -p git
git clone https://git.dynamicdiscord.de/kalipso/infrastructure
cd infrastructure
# add the new generated hardware config (and import in hosts configuration.nix)
cp /tmp/config/etc/nixos/hardware-configuration.nix machines/bakunin/
# check which harddrive we want to install the system on
lsblk #choose harddrive, in this case /dev/sda
# run nixos-install on that harddrive
sudo nix --extra-experimental-features flakes --extra-experimental-features nix-command run 'github:nix-community/disko/latest#disko-install' -- --flake .#bakunin --disk main /dev/sda
# this failed with out of memory
# running again showed: no disk left on device
# it seems the usb stick i used for flashing is way to small
# it is only
# with a bigger one (more than 8 gig i guess) it should work
# instead the disko-install tool i try the old method - first partitioning using disko and then installing the system
# for that i needed to adjust ./machines/modules/disko/btrfs-laptop.nix and set the disk to "/dev/sda"
sudo nix --extra-experimental-features "flakes nix-command" run 'github:nix-community/disko/latest' -- --mode format --flake .#bakunin
# failed with no space left on device.
# problem is lots of data is written to the local /nix/store which is mounted on tmpfs in ram
# it seems that a workaround could be modifying the bootable stick to contain a swap partition to extend tmpfs storage
```
# Testing Disko
@@ -21,3 +49,18 @@ Testing disko partitioning is working quite well. Just run the following and che
```bash
nix run -L .\#nixosConfigurations.fanny.config.system.build.vmWithDisko
```
Only problem is that encryption is not working, so it needs to be commented out. For testing host fanny the following parts in ```./machines/modules/disko/fanny.nix``` need to be commented out(for both pools!):
```nix
datasets = {
encrypted = {
options = {
encryption = "aes-256-gcm"; #THIS ONE
keyformat = "passphrase"; #THIS ONE
keylocation = "file:///tmp/root.key"; #THIS ONE
};
# use this to read the key during boot
postCreateHook = '' #THIS ONE
zfs set keylocation="prompt" "zroot/$name"; #THIS ONE
''; #THIS ONE
```

View File

@@ -1,11 +1 @@
# Updates
## Nextcloud
Update nextcloud to a new major version:
- create state directories: `mkdir /tmp/var /tmp/data`
- run vm state dirs to initialize state `sudo run-vm nextcloud --dummy-secrets --networking --var /tmp/var --data /tmp/data`
- Update lock file `nix flake update --commit-lock-file`
- Change services.nextcloud.package to the next version (do not skip major version upgrades)
- change custom `extraApps` to the new version
- TEST!
- run vm again, it should successfully upgrade nextcloud from old to new version
- run vm state dirs to initialize state `sudo run-vm nextcloud --dummy-secrets --networking --var /tmp/var --data /tmp/data`

50
flake.lock generated
View File

@@ -67,11 +67,11 @@
]
},
"locked": {
"lastModified": 1744117652,
"narHash": "sha256-t7dFCDl4vIOOUMhEZnJF15aAzkpaup9x4ZRGToDFYWI=",
"lastModified": 1736373539,
"narHash": "sha256-dinzAqCjenWDxuy+MqUQq0I4zUSfaCvN9rzuCmgMZJY=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "b4e98224ad1336751a2ac7493967a4c9f6d9cb3f",
"rev": "bd65bc3cde04c16755955630b344bc9e35272c56",
"type": "github"
},
"original": {
@@ -109,11 +109,11 @@
"spectrum": "spectrum"
},
"locked": {
"lastModified": 1743083165,
"narHash": "sha256-Fz7AiCJWtoWZ2guJwO3B1h3RuJxYWaCzFIqY0Kmkyrs=",
"lastModified": 1739104176,
"narHash": "sha256-bNvtud2PUcbYM0i5Uq1v01Dcgq7RuhVKfjaSKkW2KRI=",
"owner": "astro",
"repo": "microvm.nix",
"rev": "773d5a04e2e10ca7b412270dea11276a496e1b61",
"rev": "d3a9b7504d420a1ffd7c83c1bb8fe57deaf939d2",
"type": "github"
},
"original": {
@@ -145,11 +145,11 @@
]
},
"locked": {
"lastModified": 1742568034,
"narHash": "sha256-QaMEhcnscfF2MqB7flZr+sLJMMYZPnvqO4NYf9B4G38=",
"lastModified": 1737057290,
"narHash": "sha256-3Pe0yKlCc7EOeq1X/aJVDH0CtNL+tIBm49vpepwL1MQ=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "42ee229088490e3777ed7d1162cb9e9d8c3dbb11",
"rev": "d002ce9b6e7eb467cd1c6bb9aef9c35d191b5453",
"type": "github"
},
"original": {
@@ -160,11 +160,11 @@
},
"nixos-hardware": {
"locked": {
"lastModified": 1744366945,
"narHash": "sha256-OuLhysErPHl53BBifhesrRumJNhrlSgQDfYOTXfgIMg=",
"lastModified": 1738816619,
"narHash": "sha256-5yRlg48XmpcX5b5HesdGMOte+YuCy9rzQkJz+imcu6I=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "1fe3cc2bc5d2dc9c81cb4e63d2f67c1543340df1",
"rev": "2eccff41bab80839b1d25b303b53d339fbb07087",
"type": "github"
},
"original": {
@@ -192,11 +192,11 @@
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1744232761,
"narHash": "sha256-gbl9hE39nQRpZaLjhWKmEu5ejtQsgI5TWYrIVVJn30U=",
"lastModified": 1739020877,
"narHash": "sha256-mIvECo/NNdJJ/bXjNqIh8yeoSjVLAuDuTUzAo7dzs8Y=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f675531bc7e6657c10a18b565cfebd8aa9e24c14",
"rev": "a79cfe0ebd24952b580b1cf08cd906354996d547",
"type": "github"
},
"original": {
@@ -208,11 +208,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1744309437,
"narHash": "sha256-QZnNHM823am8apCqKSPdtnzPGTy2ZB4zIXOVoBp5+W0=",
"lastModified": 1739206421,
"narHash": "sha256-PwQASeL2cGVmrtQYlrBur0U20Xy07uSWVnFup2PHnDs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f9ebe33a928b5d529c895202263a5ce46bdf12f7",
"rev": "44534bc021b85c8d78e465021e21f33b856e2540",
"type": "github"
},
"original": {
@@ -245,11 +245,11 @@
]
},
"locked": {
"lastModified": 1744103455,
"narHash": "sha256-SR6+qjkPjGQG+8eM4dCcVtss8r9bre/LAxFMPJpaZeU=",
"lastModified": 1739262228,
"narHash": "sha256-7JAGezJ0Dn5qIyA2+T4Dt/xQgAbhCglh6lzCekTVMeU=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "69d5a5a4635c27dae5a742f36108beccc506c1ba",
"rev": "07af005bb7d60c7f118d9d9f5530485da5d1e975",
"type": "github"
},
"original": {
@@ -341,11 +341,11 @@
]
},
"locked": {
"lastModified": 1743458889,
"narHash": "sha256-eVTtsCPio3Wj/g/gvKTsyjh90vrNsmgjzXK9jMfcboM=",
"lastModified": 1737548421,
"narHash": "sha256-gmlqJdC+v86vXc2yMhiza1mvsqh3vMfrEsiw+tV5MXg=",
"ref": "refs/heads/master",
"rev": "b61466549e2687628516aa1f9ba73f251935773a",
"revCount": 30,
"rev": "c5fff78c83959841ac724980a13597dcfa6dc26d",
"revCount": 29,
"type": "git",
"url": "https://git.dynamicdiscord.de/kalipso/tasklist"
},

View File

@@ -18,7 +18,6 @@ in
inputs.self.nixosModules.malobeo.microvm
inputs.self.nixosModules.malobeo.metrics
inputs.self.nixosModules.malobeo.users
inputs.self.nixosModules.malobeo.backup
];
virtualisation.vmVariantWithDisko = {
@@ -43,11 +42,6 @@ in
cacheurl = "https://cache.dynamicdiscord.de";
};
malobeo.backup = {
enable = true;
snapshots = [ "storage/encrypted" "zroot/encrypted/var" ];
};
nix = {
settings.experimental-features = [ "nix-command" "flakes" ];
#always update microvms
@@ -59,7 +53,6 @@ in
malobeo.users = {
malobeo = true;
admin = true;
backup = true;
};
malobeo.disks = {

View File

@@ -1,4 +1,4 @@
{ config, pkgs, inputs, ... }:
{ config, pkgs, ... }:
{
imports =
@@ -9,7 +9,6 @@
../modules/sshd.nix
../modules/minimal_tools.nix
../modules/autoupdate.nix
inputs.self.nixosModules.malobeo.printing
];
malobeo.autoUpdate = {
@@ -51,8 +50,6 @@
};
services.printing.enable = true;
services.malobeo.printing.enable = true;
services.printing.drivers = [
(pkgs.writeTextDir "share/cups/model/brother5350.ppd" (builtins.readFile ../modules/BR5350_2_GPL.ppd))
pkgs.gutenprint

File diff suppressed because it is too large Load Diff

View File

@@ -133,13 +133,6 @@ rec {
mountPoint = "/var";
tag = "var";
}
] ++ pkgs.lib.optionals (options.dataPath != "") [
{
source = "${options.dataPath}";
securityModel = "mapped";
mountPoint = "/data";
tag = "data";
}
]);
interfaces = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [{
@@ -202,7 +195,8 @@ rec {
vmNestedMicroVMOverwrites = host: sopsDummy: {
microvm.vms = pkgs.lib.mkForce (
services.malobeo.microvm.deployHosts = pkgs.lib.mkForce [];
microvm.vms =
let
# Map the values to each hostname to then generate an Attrset using listToAttrs
mapperFunc = name: { inherit name; value = {
@@ -216,22 +210,20 @@ rec {
(vmMicroVMOverwrites name {
withNetworking = true;
varPath = "";
dataPath = "";
writableStore = false; })
(if sopsDummy then (vmSopsOverwrites name) else {})
]);
};
}; };
in
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts));
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
};
buildVM = host: networking: sopsDummy: disableDisko: varPath: dataPath: writableStore: fwdPort: (self.nixosConfigurations.${host}.extendModules {
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: fwdPort: (self.nixosConfigurations.${host}.extendModules {
modules = [
(vmMicroVMOverwrites host {
withNetworking = networking;
varPath = "${varPath}";
dataPath = "${dataPath}";
writableStore = writableStore;
fwdPort = fwdPort; })
(if sopsDummy then (vmSopsOverwrites host) else {})

View File

@@ -1,102 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.malobeo.backup;
hostToCommand = (hostname: datasetNames:
(map (dataset: {
name = "${hostname}_${dataset.sourceDataset}";
value = {
inherit hostname;
inherit (dataset) sourceDataset targetDataset;
};
} ) datasetNames));
peers = import ./peers.nix;
enableSnapshots = cfg.snapshots != null;
enableBackups = cfg.hosts != null;
in
{
options.malobeo.backup = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enable sanoid/syncoid based backup functionality";
};
snapshots = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = "Automatic snapshots will be created for the given datasets";
};
hosts = mkOption {
default = null;
type = types.nullOr (types.attrsOf (types.listOf (types.submodule {
options = {
sourceDataset = mkOption {
type = types.str;
description = "The source that needs to be backed up";
};
targetDataset = mkOption {
type = types.str;
description = "The target dataset where the backup should be stored";
};
};
})));
description = ''
Hostname with list of datasets to backup. This option should be defined on hosts that will store backups.
It is necessary to add the machines that get backed up to known hosts.
This can be done for example systemwide using
programs.ssh.knownHosts."10.100.0.101" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHqp2/YiiIhai7wyScGZJ20gtrzY+lp4N/8unyRs4qhc";
Or set it for the syncoid user directly.
'';
};
sshKey = mkOption {
default = null;
type = types.nullOr types.str;
description = "Set path to ssh key used for pull backups. Otherwise default key is used";
};
};
config = mkIf (cfg.enable) {
services.sanoid = mkIf (enableSnapshots) {
enable = true;
templates."default" = {
hourly = 24;
daily = 30; #keep 30 daily snapshots
monthly = 6; #keep 6 monthly backups
yearly = 0;
autosnap = true; #take snapshots automatically
autoprune = true; #delete old snapshots
};
datasets = builtins.listToAttrs (map (name: { inherit name; value = {
useTemplate = [ "default" ];
recursive = true;
}; }) cfg.snapshots);
};
services.syncoid = mkIf (enableBackups) {
enable = true;
sshKey = cfg.sshKey;
commonArgs = [
"--no-sync-snap"
];
interval = "*-*-* 04:15:00";
commands = builtins.mapAttrs (name: value: {
source = "backup@${peers.${value.hostname}.address}:${value.sourceDataset}";
target = "${value.targetDataset}";
sendOptions = "w";
recvOptions = "\"\"";
recursive = true;
})(builtins.listToAttrs (builtins.concatLists (builtins.attrValues (builtins.mapAttrs hostToCommand cfg.hosts))));
};
};
}

View File

@@ -102,22 +102,6 @@ in
/run/current-system/sw/bin/microvm -Ru ${name}
'';
};
"microvm-init-dirs@${name}" = {
description = "Initialize microvm directories";
after = [ "zfs-mount.service" ];
wantedBy = [ "microvm@${name}.service" ];
unitConfig.ConditionPathExists = "!/var/lib/microvms/${name}/.is_initialized";
serviceConfig = {
Type = "oneshot";
};
script = ''
mkdir -p /var/lib/microvms/${name}/var
mkdir -p /var/lib/microvms/${name}/etc
mkdir -p /var/lib/microvms/data/${name}
touch /var/lib/microvms/${name}/.is_initialized
'';
};
}) {} (cfg.deployHosts);
systemd.timers = builtins.foldl' (timers: name: timers // {

View File

@@ -2,7 +2,7 @@
"vpn" = {
role = "server";
publicIp = "5.9.153.217";
address = "10.100.0.1";
address = [ "10.100.0.1/24" ];
allowedIPs = [ "10.100.0.0/24" ];
listenPort = 51821;
publicKey = "hF9H10Y8Ar7zvZXFoNM8LSoaYFgPCXv30c54SSEucX4=";
@@ -11,43 +11,36 @@
"celine" = {
role = "client";
address = "10.100.0.2";
address = [ "10.100.0.2/24" ];
allowedIPs = [ "10.100.0.2/32" ];
publicKey = "Jgx82tSOmZJS4sm1o8Eci9ahaQdQir2PLq9dBqsWZw4=";
};
"desktop" = {
role = "client";
address = "10.100.0.3";
address = [ "10.100.0.3/24" ];
allowedIPs = [ "10.100.0.3/32" ];
publicKey = "FtY2lcdWcw+nvtydOOUDyaeh/xkaqHA8y9GXzqU0Am0=";
};
"atlan-pc" = {
role = "client";
address = "10.100.0.5";
address = [ "10.100.0.5/24" ];
allowedIPs = [ "10.100.0.5/32" ];
publicKey = "TrJ4UAF//zXdaLwZudI78L+rTC36zEDodTDOWNS4Y1Y=";
};
"hetzner" = {
role = "client";
address = "10.100.0.6";
address = [ "10.100.0.6/24" ];
allowedIPs = [ "10.100.0.6/32" ];
publicKey = "csRzgwtnzmSLeLkSwTwEOrdKq55UOxZacR5D3GopCTQ=";
};
"fanny" = {
role = "client";
address = "10.100.0.101";
address = [ "10.100.0.101/24" ];
allowedIPs = [ "10.100.0.101/32" ];
publicKey = "3U59F6T1s/1LaZBIa6wB0qsVuO6pRR9jfYZJIH2piAU=";
};
"backup0" = {
role = "client";
address = "10.100.0.20";
allowedIPs = [ "10.100.0.20/32" ];
publicKey = "Pp55Jg//jREzHdbbIqTXc9N7rnLZIFw904qh6NLrACE=";
};
}

View File

@@ -1,51 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.malobeo.printing;
driverFile = pkgs.writeTextDir "share/cups/model/konicaminoltac258.ppd" (builtins.readFile ../KOC658UX.ppd);
defaultPpdOptions = {
PageSize = "A4";
SelectColor = "Grayscale";
Finisher = "FS534";
SaddleUnit = "SD511";
Model = "C258";
InputSlot = "Tray1";
};
in
{
options.services.malobeo.printing = {
enable = mkOption {
type = types.bool;
default = false;
description = "Setup malobeo printers";
};
};
config = mkIf (cfg.enable) {
services.printing.enable = true;
services.printing.drivers = [
driverFile
];
hardware.printers.ensurePrinters = [ {
name = "KonicaDefault";
model = "konicaminoltac258.ppd";
location = "Zine Workshop";
deviceUri = "ipp://192.168.1.42/ipp";
ppdOptions = defaultPpdOptions;
}
{
name = "KonicaBooklet";
model = "konicaminoltac258.ppd";
location = "Zine Workshop";
deviceUri = "ipp://192.168.1.42/ipp";
ppdOptions = defaultPpdOptions // {
Fold = "Stitch";
Staple = "None";
};
}
];
};
}

View File

@@ -68,11 +68,7 @@ in
users = [ "backup" ];
commands = [
{
command = "/run/current-system/sw/bin/zfs";
options = [ "NOPASSWD" ];
}
{
command = "/run/current-system/sw/bin/zpool";
command = "${pkgs.zfs-user}/bin/zfs";
options = [ "NOPASSWD" ];
}
];
@@ -98,4 +94,4 @@ in
];
}
];
}
}

View File

@@ -70,7 +70,7 @@ in
interfaces = {
malovpn = {
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
address = [ "${myPeer.address}/24" ];
address = myPeer.address;
autostart = cfg.autostart;
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;

View File

@@ -33,7 +33,7 @@ with lib;
services.nextcloud = {
enable = true;
package = pkgs.nextcloud31;
package = pkgs.nextcloud30;
hostName = "cloud.malobeo.org";
config.adminpassFile = config.sops.secrets.nextcloudAdminPass.path;
#https = true; #disable for testing
@@ -47,22 +47,21 @@ with lib;
};
extraAppsEnable = true;
extraApps = {
inherit (config.services.nextcloud.package.packages.apps) contacts calendar deck polls registration collectives forms;
appointments = pkgs.fetchNextcloudApp {
sha256 = "sha256-ls1rLnsX7U9wo2WkEtzhrvliTcWUl6LWXolE/9etJ78=";
url = "https://github.com/SergeyMosin/Appointments/raw/refs/tags/v2.4.3/build/artifacts/appstore/appointments.tar.gz";
inherit (config.services.nextcloud.package.packages.apps) contacts calendar deck polls;
collectives = pkgs.fetchNextcloudApp {
sha256 = "sha256-cj/8FhzxOACJaUEu0eG9r7iAQmnOG62yFHeyUICalFY=";
url = "https://github.com/nextcloud/collectives/releases/download/v2.15.2/collectives-2.15.2.tar.gz";
license = "agpl3Plus";
};
};
settings = {
trusted_domains = ["10.0.0.13"];
trusted_proxies = [ "10.0.0.1" ];
"maintenance_window_start" = "1";
"default_phone_region" = "DE";
};
phpOptions = {
"realpath_cache_size" = "0";
"opcache.interned_strings_buffer" = "32";
"opcache.interned_strings_buffer" = "23";
};
};

View File

@@ -116,8 +116,6 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
disko.imports = [ ./machines/modules/disko ];
users.imports = [ ./machines/modules/malobeo/users.nix ];
backup.imports = [ ./machines/modules/malobeo/backup.nix ];
printing.imports = [ ./machines/modules/malobeo/printing.nix ];
};
hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) (

View File

@@ -6,7 +6,6 @@ usage() {
echo "--no-disko disable disko and initrd secrets. needed for real hosts like fanny"
echo "--writable-store enables writable store. necessary for host with nested imperative microvms like fanny"
echo "--var path to directory that should be shared as /var. may require root otherwise some systemd units fail within vm. if dir is empty vm will populate"
echo "--data path to directory that should be shared as /data"
echo "--fwd-port forwards the given port to port 80 on vm"
exit 1
}
@@ -24,7 +23,6 @@ DUMMY_SECRETS=false
NO_DISKO=false
RW_STORE=false
VAR_PATH=""
DATA_PATH=""
FWD_PORT=0
# check argws
@@ -44,15 +42,6 @@ while [[ "$#" -gt 0 ]]; do
usage
fi
;;
--data)
if [[ -n "$2" && ! "$2" =~ ^- ]]; then
DATA_PATH="$2"
shift
else
echo "Error: --data requires a non-empty string argument."
usage
fi
;;
--fwd-port)
if [[ -n "$2" && ! "$2" =~ ^- ]]; then
FWD_PORT="$2"
@@ -75,8 +64,4 @@ if [ -n "$VAR_PATH" ]; then
echo "sharing var directory: $VAR_PATH"
fi
if [ -n "$DATA_PATH" ]; then
echo "sharing data directory: $DATA_PATH"
fi
nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\" \"$DATA_PATH\" $RW_STORE $FWD_PORT).config.microvm.declaredRunner"
nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\" $RW_STORE $FWD_PORT).config.microvm.declaredRunner"