Compare commits
28 Commits
e4f6cf2595
...
printer-mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
80fc4cc528
|
|||
|
8b37082844
|
|||
|
6c3a7be483
|
|||
|
568cce0d48
|
|||
|
46e9bae193
|
|||
| 4949719307 | |||
|
e8c188debf
|
|||
|
1f559d93ba
|
|||
|
a03b7506c5
|
|||
|
3b2a7cedc5
|
|||
|
a48e271853
|
|||
|
d202a3d0cb
|
|||
| ef33833910 | |||
| d73031e7f1 | |||
| be0bb0b08b | |||
| 026494c877 | |||
| 3021716640 | |||
| 70ec63f213 | |||
| 91d86c49a1 | |||
| 96dee29595 | |||
| d5e94b50cb | |||
| 286e03c853 | |||
| 766b738a6a | |||
| de600fe7c7 | |||
| 5731fc795e | |||
| 1083949c87 | |||
| 413202e940 | |||
| ec20c80251 |
@@ -1,47 +1,19 @@
|
|||||||
# Create host with disko-install
|
# Create host with nixos-anywhere
|
||||||
How to use disko-install is described here: https://github.com/nix-community/disko/blob/master/docs/disko-install.md
|
We use a nixos-anywhere wrapper script to deploy new hosts.
|
||||||
---
|
The wrapper script takes care of copying persistent host keys before calling nixos-anywhere.
|
||||||
Here are the exact steps to get bakunin running:
|
|
||||||
First create machines/hostname/configuration.nix
|
To accomplish that boot the host from a nixos image and setup a root password.
|
||||||
Add hosts nixosConfiguration in machines/configurations.nix
|
|
||||||
Boot nixos installer on the Machine.
|
|
||||||
``` bash
|
``` bash
|
||||||
# establish network connection
|
sudo su
|
||||||
wpa_passphrase "network" "password" > wpa.conf
|
passwd
|
||||||
wpa_supplicant -B -i wlp3s0 -c wpa.conf
|
```
|
||||||
ping 8.8.8.8
|
|
||||||
# if that works continue
|
|
||||||
|
|
||||||
# generate a base hardware config
|
After that get the hosts ip using `ip a` and start deployment from your own machine:
|
||||||
nixos-generate-config --root /tmp/config --no-filesystems
|
|
||||||
|
|
||||||
# get the infra repo
|
``` bash
|
||||||
nix-shell -p git
|
# from infrastrucutre repository root dir:
|
||||||
git clone https://git.dynamicdiscord.de/kalipso/infrastructure
|
nix develop .#
|
||||||
cd infrastructure
|
remote-install hostname 10.0.42.23
|
||||||
|
|
||||||
# add the new generated hardware config (and import in hosts configuration.nix)
|
|
||||||
cp /tmp/config/etc/nixos/hardware-configuration.nix machines/bakunin/
|
|
||||||
|
|
||||||
# check which harddrive we want to install the system on
|
|
||||||
lsblk #choose harddrive, in this case /dev/sda
|
|
||||||
|
|
||||||
# run nixos-install on that harddrive
|
|
||||||
sudo nix --extra-experimental-features flakes --extra-experimental-features nix-command run 'github:nix-community/disko/latest#disko-install' -- --flake .#bakunin --disk main /dev/sda
|
|
||||||
|
|
||||||
# this failed with out of memory
|
|
||||||
# running again showed: no disk left on device
|
|
||||||
# it seems the usb stick i used for flashing is way to small
|
|
||||||
# it is only
|
|
||||||
# with a bigger one (more than 8 gig i guess) it should work
|
|
||||||
# instead the disko-install tool i try the old method - first partitioning using disko and then installing the system
|
|
||||||
# for that i needed to adjust ./machines/modules/disko/btrfs-laptop.nix and set the disk to "/dev/sda"
|
|
||||||
|
|
||||||
sudo nix --extra-experimental-features "flakes nix-command" run 'github:nix-community/disko/latest' -- --mode format --flake .#bakunin
|
|
||||||
|
|
||||||
# failed with no space left on device.
|
|
||||||
# problem is lots of data is written to the local /nix/store which is mounted on tmpfs in ram
|
|
||||||
# it seems that a workaround could be modifying the bootable stick to contain a swap partition to extend tmpfs storage
|
|
||||||
```
|
```
|
||||||
|
|
||||||
# Testing Disko
|
# Testing Disko
|
||||||
@@ -49,18 +21,3 @@ Testing disko partitioning is working quite well. Just run the following and che
|
|||||||
```bash
|
```bash
|
||||||
nix run -L .\#nixosConfigurations.fanny.config.system.build.vmWithDisko
|
nix run -L .\#nixosConfigurations.fanny.config.system.build.vmWithDisko
|
||||||
```
|
```
|
||||||
|
|
||||||
Only problem is that encryption is not working, so it needs to be commented out. For testing host fanny the following parts in ```./machines/modules/disko/fanny.nix``` need to be commented out(for both pools!):
|
|
||||||
```nix
|
|
||||||
datasets = {
|
|
||||||
encrypted = {
|
|
||||||
options = {
|
|
||||||
encryption = "aes-256-gcm"; #THIS ONE
|
|
||||||
keyformat = "passphrase"; #THIS ONE
|
|
||||||
keylocation = "file:///tmp/root.key"; #THIS ONE
|
|
||||||
};
|
|
||||||
# use this to read the key during boot
|
|
||||||
postCreateHook = '' #THIS ONE
|
|
||||||
zfs set keylocation="prompt" "zroot/$name"; #THIS ONE
|
|
||||||
''; #THIS ONE
|
|
||||||
```
|
|
||||||
|
|||||||
8
flake.lock
generated
8
flake.lock
generated
@@ -341,11 +341,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1737548421,
|
"lastModified": 1743458889,
|
||||||
"narHash": "sha256-gmlqJdC+v86vXc2yMhiza1mvsqh3vMfrEsiw+tV5MXg=",
|
"narHash": "sha256-eVTtsCPio3Wj/g/gvKTsyjh90vrNsmgjzXK9jMfcboM=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "c5fff78c83959841ac724980a13597dcfa6dc26d",
|
"rev": "b61466549e2687628516aa1f9ba73f251935773a",
|
||||||
"revCount": 29,
|
"revCount": 30,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.dynamicdiscord.de/kalipso/tasklist"
|
"url": "https://git.dynamicdiscord.de/kalipso/tasklist"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ in
|
|||||||
inputs.self.nixosModules.malobeo.microvm
|
inputs.self.nixosModules.malobeo.microvm
|
||||||
inputs.self.nixosModules.malobeo.metrics
|
inputs.self.nixosModules.malobeo.metrics
|
||||||
inputs.self.nixosModules.malobeo.users
|
inputs.self.nixosModules.malobeo.users
|
||||||
|
inputs.self.nixosModules.malobeo.backup
|
||||||
];
|
];
|
||||||
|
|
||||||
virtualisation.vmVariantWithDisko = {
|
virtualisation.vmVariantWithDisko = {
|
||||||
@@ -42,6 +43,11 @@ in
|
|||||||
cacheurl = "https://cache.dynamicdiscord.de";
|
cacheurl = "https://cache.dynamicdiscord.de";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
malobeo.backup = {
|
||||||
|
enable = true;
|
||||||
|
snapshots = [ "storage/encrypted" "zroot/encrypted/var" ];
|
||||||
|
};
|
||||||
|
|
||||||
nix = {
|
nix = {
|
||||||
settings.experimental-features = [ "nix-command" "flakes" ];
|
settings.experimental-features = [ "nix-command" "flakes" ];
|
||||||
#always update microvms
|
#always update microvms
|
||||||
@@ -53,6 +59,7 @@ in
|
|||||||
malobeo.users = {
|
malobeo.users = {
|
||||||
malobeo = true;
|
malobeo = true;
|
||||||
admin = true;
|
admin = true;
|
||||||
|
backup = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
malobeo.disks = {
|
malobeo.disks = {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{ config, pkgs, ... }:
|
{ config, pkgs, inputs, ... }:
|
||||||
|
|
||||||
{
|
{
|
||||||
imports =
|
imports =
|
||||||
@@ -9,6 +9,7 @@
|
|||||||
../modules/sshd.nix
|
../modules/sshd.nix
|
||||||
../modules/minimal_tools.nix
|
../modules/minimal_tools.nix
|
||||||
../modules/autoupdate.nix
|
../modules/autoupdate.nix
|
||||||
|
inputs.self.nixosModules.malobeo.printing
|
||||||
];
|
];
|
||||||
|
|
||||||
malobeo.autoUpdate = {
|
malobeo.autoUpdate = {
|
||||||
@@ -50,6 +51,8 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
services.printing.enable = true;
|
services.printing.enable = true;
|
||||||
|
services.malobeo.printing.enable = true;
|
||||||
|
|
||||||
services.printing.drivers = [
|
services.printing.drivers = [
|
||||||
(pkgs.writeTextDir "share/cups/model/brother5350.ppd" (builtins.readFile ../modules/BR5350_2_GPL.ppd))
|
(pkgs.writeTextDir "share/cups/model/brother5350.ppd" (builtins.readFile ../modules/BR5350_2_GPL.ppd))
|
||||||
pkgs.gutenprint
|
pkgs.gutenprint
|
||||||
|
|||||||
4777
machines/modules/KOC658UX.ppd
Normal file
4777
machines/modules/KOC658UX.ppd
Normal file
File diff suppressed because it is too large
Load Diff
@@ -195,8 +195,7 @@ rec {
|
|||||||
|
|
||||||
vmNestedMicroVMOverwrites = host: sopsDummy: {
|
vmNestedMicroVMOverwrites = host: sopsDummy: {
|
||||||
|
|
||||||
services.malobeo.microvm.deployHosts = pkgs.lib.mkForce [];
|
microvm.vms = pkgs.lib.mkForce (
|
||||||
microvm.vms =
|
|
||||||
let
|
let
|
||||||
# Map the values to each hostname to then generate an Attrset using listToAttrs
|
# Map the values to each hostname to then generate an Attrset using listToAttrs
|
||||||
mapperFunc = name: { inherit name; value = {
|
mapperFunc = name: { inherit name; value = {
|
||||||
@@ -216,7 +215,7 @@ rec {
|
|||||||
};
|
};
|
||||||
}; };
|
}; };
|
||||||
in
|
in
|
||||||
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
|
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts));
|
||||||
};
|
};
|
||||||
|
|
||||||
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: fwdPort: (self.nixosConfigurations.${host}.extendModules {
|
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: fwdPort: (self.nixosConfigurations.${host}.extendModules {
|
||||||
|
|||||||
102
machines/modules/malobeo/backup.nix
Normal file
102
machines/modules/malobeo/backup.nix
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
with lib;
|
||||||
|
let
|
||||||
|
cfg = config.malobeo.backup;
|
||||||
|
hostToCommand = (hostname: datasetNames:
|
||||||
|
(map (dataset: {
|
||||||
|
name = "${hostname}_${dataset.sourceDataset}";
|
||||||
|
value = {
|
||||||
|
inherit hostname;
|
||||||
|
inherit (dataset) sourceDataset targetDataset;
|
||||||
|
};
|
||||||
|
} ) datasetNames));
|
||||||
|
peers = import ./peers.nix;
|
||||||
|
|
||||||
|
enableSnapshots = cfg.snapshots != null;
|
||||||
|
enableBackups = cfg.hosts != null;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.malobeo.backup = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Enable sanoid/syncoid based backup functionality";
|
||||||
|
};
|
||||||
|
|
||||||
|
snapshots = mkOption {
|
||||||
|
type = types.nullOr (types.listOf types.str);
|
||||||
|
default = null;
|
||||||
|
description = "Automatic snapshots will be created for the given datasets";
|
||||||
|
};
|
||||||
|
|
||||||
|
hosts = mkOption {
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr (types.attrsOf (types.listOf (types.submodule {
|
||||||
|
options = {
|
||||||
|
sourceDataset = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "The source that needs to be backed up";
|
||||||
|
};
|
||||||
|
targetDataset = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "The target dataset where the backup should be stored";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
})));
|
||||||
|
description = ''
|
||||||
|
Hostname with list of datasets to backup. This option should be defined on hosts that will store backups.
|
||||||
|
|
||||||
|
It is necessary to add the machines that get backed up to known hosts.
|
||||||
|
This can be done for example systemwide using
|
||||||
|
programs.ssh.knownHosts."10.100.0.101" = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHqp2/YiiIhai7wyScGZJ20gtrzY+lp4N/8unyRs4qhc";
|
||||||
|
Or set it for the syncoid user directly.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
sshKey = mkOption {
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
description = "Set path to ssh key used for pull backups. Otherwise default key is used";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf (cfg.enable) {
|
||||||
|
services.sanoid = mkIf (enableSnapshots) {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
templates."default" = {
|
||||||
|
hourly = 24;
|
||||||
|
daily = 30; #keep 30 daily snapshots
|
||||||
|
monthly = 6; #keep 6 monthly backups
|
||||||
|
yearly = 0;
|
||||||
|
|
||||||
|
autosnap = true; #take snapshots automatically
|
||||||
|
autoprune = true; #delete old snapshots
|
||||||
|
};
|
||||||
|
|
||||||
|
datasets = builtins.listToAttrs (map (name: { inherit name; value = {
|
||||||
|
useTemplate = [ "default" ];
|
||||||
|
recursive = true;
|
||||||
|
}; }) cfg.snapshots);
|
||||||
|
};
|
||||||
|
|
||||||
|
services.syncoid = mkIf (enableBackups) {
|
||||||
|
enable = true;
|
||||||
|
sshKey = cfg.sshKey;
|
||||||
|
|
||||||
|
commonArgs = [
|
||||||
|
"--no-sync-snap"
|
||||||
|
];
|
||||||
|
|
||||||
|
interval = "*-*-* 04:15:00";
|
||||||
|
|
||||||
|
commands = builtins.mapAttrs (name: value: {
|
||||||
|
source = "backup@${peers.${value.hostname}.address}:${value.sourceDataset}";
|
||||||
|
target = "${value.targetDataset}";
|
||||||
|
sendOptions = "w";
|
||||||
|
recvOptions = "\"\"";
|
||||||
|
recursive = true;
|
||||||
|
})(builtins.listToAttrs (builtins.concatLists (builtins.attrValues (builtins.mapAttrs hostToCommand cfg.hosts))));
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -102,6 +102,22 @@ in
|
|||||||
/run/current-system/sw/bin/microvm -Ru ${name}
|
/run/current-system/sw/bin/microvm -Ru ${name}
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
"microvm-init-dirs@${name}" = {
|
||||||
|
description = "Initialize microvm directories";
|
||||||
|
after = [ "zfs-mount.service" ];
|
||||||
|
wantedBy = [ "microvm@${name}.service" ];
|
||||||
|
unitConfig.ConditionPathExists = "!/var/lib/microvms/${name}/.is_initialized";
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
};
|
||||||
|
script = ''
|
||||||
|
mkdir -p /var/lib/microvms/${name}/var
|
||||||
|
mkdir -p /var/lib/microvms/${name}/etc
|
||||||
|
mkdir -p /var/lib/microvms/data/${name}
|
||||||
|
touch /var/lib/microvms/${name}/.is_initialized
|
||||||
|
'';
|
||||||
|
};
|
||||||
}) {} (cfg.deployHosts);
|
}) {} (cfg.deployHosts);
|
||||||
|
|
||||||
systemd.timers = builtins.foldl' (timers: name: timers // {
|
systemd.timers = builtins.foldl' (timers: name: timers // {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
"vpn" = {
|
"vpn" = {
|
||||||
role = "server";
|
role = "server";
|
||||||
publicIp = "5.9.153.217";
|
publicIp = "5.9.153.217";
|
||||||
address = [ "10.100.0.1/24" ];
|
address = "10.100.0.1";
|
||||||
allowedIPs = [ "10.100.0.0/24" ];
|
allowedIPs = [ "10.100.0.0/24" ];
|
||||||
listenPort = 51821;
|
listenPort = 51821;
|
||||||
publicKey = "hF9H10Y8Ar7zvZXFoNM8LSoaYFgPCXv30c54SSEucX4=";
|
publicKey = "hF9H10Y8Ar7zvZXFoNM8LSoaYFgPCXv30c54SSEucX4=";
|
||||||
@@ -11,36 +11,43 @@
|
|||||||
|
|
||||||
"celine" = {
|
"celine" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.2/24" ];
|
address = "10.100.0.2";
|
||||||
allowedIPs = [ "10.100.0.2/32" ];
|
allowedIPs = [ "10.100.0.2/32" ];
|
||||||
publicKey = "Jgx82tSOmZJS4sm1o8Eci9ahaQdQir2PLq9dBqsWZw4=";
|
publicKey = "Jgx82tSOmZJS4sm1o8Eci9ahaQdQir2PLq9dBqsWZw4=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"desktop" = {
|
"desktop" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.3/24" ];
|
address = "10.100.0.3";
|
||||||
allowedIPs = [ "10.100.0.3/32" ];
|
allowedIPs = [ "10.100.0.3/32" ];
|
||||||
publicKey = "FtY2lcdWcw+nvtydOOUDyaeh/xkaqHA8y9GXzqU0Am0=";
|
publicKey = "FtY2lcdWcw+nvtydOOUDyaeh/xkaqHA8y9GXzqU0Am0=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"atlan-pc" = {
|
"atlan-pc" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.5/24" ];
|
address = "10.100.0.5";
|
||||||
allowedIPs = [ "10.100.0.5/32" ];
|
allowedIPs = [ "10.100.0.5/32" ];
|
||||||
publicKey = "TrJ4UAF//zXdaLwZudI78L+rTC36zEDodTDOWNS4Y1Y=";
|
publicKey = "TrJ4UAF//zXdaLwZudI78L+rTC36zEDodTDOWNS4Y1Y=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"hetzner" = {
|
"hetzner" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.6/24" ];
|
address = "10.100.0.6";
|
||||||
allowedIPs = [ "10.100.0.6/32" ];
|
allowedIPs = [ "10.100.0.6/32" ];
|
||||||
publicKey = "csRzgwtnzmSLeLkSwTwEOrdKq55UOxZacR5D3GopCTQ=";
|
publicKey = "csRzgwtnzmSLeLkSwTwEOrdKq55UOxZacR5D3GopCTQ=";
|
||||||
};
|
};
|
||||||
|
|
||||||
"fanny" = {
|
"fanny" = {
|
||||||
role = "client";
|
role = "client";
|
||||||
address = [ "10.100.0.101/24" ];
|
address = "10.100.0.101";
|
||||||
allowedIPs = [ "10.100.0.101/32" ];
|
allowedIPs = [ "10.100.0.101/32" ];
|
||||||
publicKey = "3U59F6T1s/1LaZBIa6wB0qsVuO6pRR9jfYZJIH2piAU=";
|
publicKey = "3U59F6T1s/1LaZBIa6wB0qsVuO6pRR9jfYZJIH2piAU=";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
"backup0" = {
|
||||||
|
role = "client";
|
||||||
|
address = "10.100.0.20";
|
||||||
|
allowedIPs = [ "10.100.0.20/32" ];
|
||||||
|
publicKey = "Pp55Jg//jREzHdbbIqTXc9N7rnLZIFw904qh6NLrACE=";
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
51
machines/modules/malobeo/printing.nix
Normal file
51
machines/modules/malobeo/printing.nix
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
with lib;
|
||||||
|
let
|
||||||
|
cfg = config.services.malobeo.printing;
|
||||||
|
driverFile = pkgs.writeTextDir "share/cups/model/konicaminoltac258.ppd" (builtins.readFile ../KOC658UX.ppd);
|
||||||
|
|
||||||
|
defaultPpdOptions = {
|
||||||
|
PageSize = "A4";
|
||||||
|
SelectColor = "Grayscale";
|
||||||
|
Finisher = "FS534";
|
||||||
|
SaddleUnit = "SD511";
|
||||||
|
Model = "C258";
|
||||||
|
InputSlot = "Tray1";
|
||||||
|
};
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.malobeo.printing = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Setup malobeo printers";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf (cfg.enable) {
|
||||||
|
services.printing.enable = true;
|
||||||
|
services.printing.drivers = [
|
||||||
|
driverFile
|
||||||
|
];
|
||||||
|
|
||||||
|
hardware.printers.ensurePrinters = [ {
|
||||||
|
name = "KonicaDefault";
|
||||||
|
model = "konicaminoltac258.ppd";
|
||||||
|
location = "Zine Workshop";
|
||||||
|
deviceUri = "ipp://192.168.1.42/ipp";
|
||||||
|
ppdOptions = defaultPpdOptions;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
name = "KonicaBooklet";
|
||||||
|
model = "konicaminoltac258.ppd";
|
||||||
|
location = "Zine Workshop";
|
||||||
|
deviceUri = "ipp://192.168.1.42/ipp";
|
||||||
|
ppdOptions = defaultPpdOptions // {
|
||||||
|
Fold = "Stitch";
|
||||||
|
Staple = "None";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -68,7 +68,11 @@ in
|
|||||||
users = [ "backup" ];
|
users = [ "backup" ];
|
||||||
commands = [
|
commands = [
|
||||||
{
|
{
|
||||||
command = "${pkgs.zfs-user}/bin/zfs";
|
command = "/run/current-system/sw/bin/zfs";
|
||||||
|
options = [ "NOPASSWD" ];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
command = "/run/current-system/sw/bin/zpool";
|
||||||
options = [ "NOPASSWD" ];
|
options = [ "NOPASSWD" ];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
@@ -94,4 +98,4 @@ in
|
|||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ in
|
|||||||
interfaces = {
|
interfaces = {
|
||||||
malovpn = {
|
malovpn = {
|
||||||
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
|
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
|
||||||
address = myPeer.address;
|
address = [ "${myPeer.address}/24" ];
|
||||||
autostart = cfg.autostart;
|
autostart = cfg.autostart;
|
||||||
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;
|
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ with lib;
|
|||||||
};
|
};
|
||||||
extraAppsEnable = true;
|
extraAppsEnable = true;
|
||||||
extraApps = {
|
extraApps = {
|
||||||
inherit (config.services.nextcloud.package.packages.apps) contacts calendar deck polls;
|
inherit (config.services.nextcloud.package.packages.apps) contacts calendar deck polls registration;
|
||||||
collectives = pkgs.fetchNextcloudApp {
|
collectives = pkgs.fetchNextcloudApp {
|
||||||
sha256 = "sha256-cj/8FhzxOACJaUEu0eG9r7iAQmnOG62yFHeyUICalFY=";
|
sha256 = "sha256-cj/8FhzxOACJaUEu0eG9r7iAQmnOG62yFHeyUICalFY=";
|
||||||
url = "https://github.com/nextcloud/collectives/releases/download/v2.15.2/collectives-2.15.2.tar.gz";
|
url = "https://github.com/nextcloud/collectives/releases/download/v2.15.2/collectives-2.15.2.tar.gz";
|
||||||
@@ -56,6 +56,7 @@ with lib;
|
|||||||
};
|
};
|
||||||
settings = {
|
settings = {
|
||||||
trusted_domains = ["10.0.0.13"];
|
trusted_domains = ["10.0.0.13"];
|
||||||
|
trusted_proxies = [ "10.0.0.1" ];
|
||||||
"maintenance_window_start" = "1";
|
"maintenance_window_start" = "1";
|
||||||
"default_phone_region" = "DE";
|
"default_phone_region" = "DE";
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -116,6 +116,8 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
|
|||||||
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
|
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
|
||||||
disko.imports = [ ./machines/modules/disko ];
|
disko.imports = [ ./machines/modules/disko ];
|
||||||
users.imports = [ ./machines/modules/malobeo/users.nix ];
|
users.imports = [ ./machines/modules/malobeo/users.nix ];
|
||||||
|
backup.imports = [ ./machines/modules/malobeo/backup.nix ];
|
||||||
|
printing.imports = [ ./machines/modules/malobeo/printing.nix ];
|
||||||
};
|
};
|
||||||
|
|
||||||
hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) (
|
hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) (
|
||||||
|
|||||||
Reference in New Issue
Block a user