32 Commits

Author SHA1 Message Date
eafe7a6b95 [vpn] update dummy secrets
All checks were successful
Check flake syntax / flake-check (push) Successful in 8m32s
2025-01-19 23:48:02 +01:00
fe9ff06aae [lucia] init dummy secrets 2025-01-19 23:47:50 +01:00
ba6e219d64 [fanny] init dummy secrets 2025-01-19 23:47:41 +01:00
7431209bc2 [durruti] rm secrets.yaml - currently empty 2025-01-19 23:44:51 +01:00
015c326042 [nix] rm vm interface overwrite
Some checks failed
Check flake syntax / flake-check (push) Failing after 6m22s
2025-01-19 23:40:15 +01:00
5f780e17eb [nextcloud] add dummy secrets 2025-01-19 23:39:51 +01:00
fda348f5da [sops] add a dummy key to allow secret usage within test vms 2025-01-19 22:55:54 +01:00
36ec5f5837 [sops] test sharing hostkey with vm 2025-01-19 22:55:52 +01:00
68b3da7df8 [fanny] proxypass cloud.malobeo.org
All checks were successful
Check flake syntax / flake-check (push) Successful in 6m3s
2025-01-19 14:53:39 +01:00
affcc71eb1 [fanny] deploy nextcloud
Some checks failed
Check flake syntax / flake-check (push) Has been cancelled
2025-01-19 14:52:33 +01:00
4462856fa0 [nextcloud] rm obsolete nameserver 2025-01-19 14:52:20 +01:00
5352c1fa4d [docs] make readme the index
still most of it is quite out of date...
2025-01-19 14:30:58 +01:00
ahtlon
fabf48a5c0 [nextcloud] nextcloud works now
All checks were successful
Check flake syntax / flake-check (push) Successful in 8m17s
2025-01-19 14:22:08 +01:00
ahtlon
617c177892 [nextcloud] flake update because for some reason the sha changed 2025-01-19 14:22:08 +01:00
ahtlon
9b4cd02e53 [nextcloud] enable postgress, redis, change domain 2025-01-19 14:22:08 +01:00
ahtlon
fab1b18263 [nextcloud] rm discourse 2025-01-19 14:22:08 +01:00
cbd041f563 [nextcloud] fix hostname 2025-01-19 14:22:08 +01:00
ahtlon
ef25c686b4 add nextcloud collectives 2025-01-19 14:22:08 +01:00
ahtlon
66392ca2c2 login geht 2025-01-19 14:22:08 +01:00
ahtlon
9afa8987e7 nextcloud minimal 2025-01-19 14:22:08 +01:00
ahtlon
0239733e62 sops.... 2025-01-19 14:22:08 +01:00
ahtlon
d9cf3588bf Start over but right this time 2025-01-19 14:22:08 +01:00
ahtlon
2500b8ab9a basic discourse example 2025-01-19 14:22:08 +01:00
ahtlon
52824e39ee with nix flake check the hydraJobs output is evaluated in the same way as Hydra's hydra-eval-jobs
All checks were successful
Check flake syntax / flake-check (push) Successful in 13m21s
2025-01-18 23:41:53 +01:00
ahtlon
8793120436 Only run on push 2025-01-18 23:40:11 +01:00
ahtlon
950ada1e10 [actions] Add flake check
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m33s
Check flake syntax / flake-check (push) Successful in 7m30s
2025-01-18 22:24:21 +01:00
ahtlon
1e269966ff Merge branch 'fix-flake-check'
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 5m54s
nix flake check and show now work again
2025-01-18 22:02:19 +01:00
ahtlon
3861daaf76 [modules] move microvm module import from makeMicroVM to baseModules 2025-01-18 22:01:06 +01:00
ahtlon
3a332e77d1 [scripts] move packages to legacyPackages 2025-01-18 21:45:48 +01:00
ahtlon
79c311b45d Merge branch 'issue51'
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 6m3s
Fixes #51
2025-01-18 20:41:06 +01:00
ahtlon
850070f987 [scripts] check for flake.nix
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m15s
2025-01-18 20:39:16 +01:00
ahtlon
d242562544 [packages] make scripts available in shell without nix run
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m7s
2025-01-18 20:04:22 +01:00
16 changed files with 94 additions and 25697 deletions

View File

@@ -12,31 +12,13 @@ Use durruti as orientation:
"10.0.0.5" is the IP assigned to its tap interface. "10.0.0.5" is the IP assigned to its tap interface.
### Testing MicroVMs locally ### Testing MicroVMs locally
MicroVMs can be built and run easily on your localhost for development. MicroVMs can be built and run easily on your local host, but they are not persistent!
We provide the script ```run-vm``` to handle stuff like development (dummy) secrets, sharing directories, ect. easily. For durruti for example this is done by:
Usage examples:
``` bash ``` bash
# run without args to get available options and usage info nix run .\#durruti-vm
run-vm
# run nextcloud locally with dummy secrets
run-vm nextcloud --dummy-secrets
# share a local folder as /var/lib dir so that nextcloud application data stays persistent between boots
mkdir /tmp/nextcloud
run-vm nextcloud --dummy-secrets --varlib /tmp/nextcloud
# enable networking to provide connectivity between multiple vms
# for that the malobeo hostBridge must be enabled on your host
# this example deploys persistent grafana on overwatch and fetches metrics from infradocs
mkdir overwatch
run-vm overwatch --networking --varlib /tmp/overwatch
run-vm infradocs --networking
``` ```
### Testing persistent microvms
### Fully deploy microvms on local host
In order to test persistent microvms locally we need to create them using the ```microvm``` command. In order to test persistent microvms locally we need to create them using the ```microvm``` command.
This is necessary to be able to mount persistent /etc and /var volumes on those hosts. This is necessary to be able to mount persistent /etc and /var volumes on those hosts.
Do the following: Do the following:

View File

@@ -43,7 +43,6 @@ let
defaultModules = baseModules; defaultModules = baseModules;
makeMicroVM = hostName: ipv4Addr: macAddr: modules: [ makeMicroVM = hostName: ipv4Addr: macAddr: modules: [
self.nixosModules.malobeo.metrics
{ {
microvm = { microvm = {
hypervisor = "cloud-hypervisor"; hypervisor = "cloud-hypervisor";
@@ -81,13 +80,6 @@ let
]; ];
}; };
malobeo.metrics = {
enable = true;
enablePromtail = true;
logNginx = false;
lokiHost = "10.0.0.14";
};
systemd.network.enable = true; systemd.network.enable = true;
systemd.network.networks."20-lan" = { systemd.network.networks."20-lan" = {
@@ -187,19 +179,11 @@ in
]; ];
}; };
overwatch = nixosSystem {
system = "x86_64-linux";
specialArgs.inputs = inputs;
specialArgs.self = self;
modules = makeMicroVM "overwatch" "10.0.0.14" "D0:E5:CA:F0:D7:E0" [
./overwatch/configuration.nix
];
};
testvm = nixosSystem { testvm = nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
specialArgs.inputs = inputs; specialArgs.inputs = inputs;
specialArgs.self = self; specialArgs.self = self;
modules = defaultModules ++ [ ./testvm ]; modules = defaultModules ++ [ ./testvm ];
}; };
} }

View File

@@ -6,6 +6,7 @@ with lib;
networking = { networking = {
hostName = mkDefault "durruti"; hostName = mkDefault "durruti";
useDHCP = false; useDHCP = false;
nameservers = [ "1.1.1.1" ];
}; };
networking.firewall.allowedTCPPorts = [ 8080 ]; networking.firewall.allowedTCPPorts = [ 8080 ];

View File

@@ -53,7 +53,7 @@ in
}; };
services.malobeo.microvm.enableHostBridge = true; services.malobeo.microvm.enableHostBridge = true;
services.malobeo.microvm.deployHosts = [ "infradocs" "nextcloud" "overwatch" ]; services.malobeo.microvm.deployHosts = [ "infradocs" "nextcloud" ];
networking = { networking = {
firewall = { firewall = {

View File

@@ -6,6 +6,7 @@ with lib;
networking = { networking = {
hostName = mkDefault "infradocs"; hostName = mkDefault "infradocs";
useDHCP = false; useDHCP = false;
nameservers = [ "1.1.1.1" ];
}; };
imports = [ imports = [
@@ -14,12 +15,6 @@ with lib;
../modules/sshd.nix ../modules/sshd.nix
]; ];
networking.firewall.allowedTCPPorts = [ 9002 ];
malobeo.metrics.logNginx = lib.mkForce true;
users.users.promtail.extraGroups = [ "nginx" "systemd-journal" ];
system.stateVersion = "22.11"; # Did you read the comment? system.stateVersion = "22.11"; # Did you read the comment?
} }

View File

@@ -1,56 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.malobeo.metrics;
in
{
options.malobeo.metrics = {
enable = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable sharing metrics";
};
enablePromtail = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Enable sharing logs";
};
logNginx = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Share nginx logs";
};
lokiHost = lib.mkOption {
type = lib.types.str;
default = "10.0.0.14";
description = "Address of loki host";
};
};
config = lib.mkIf (cfg.enable) {
networking.firewall.allowedTCPPorts = [ 9002 ];
services.prometheus = {
exporters = {
node = {
enable = true;
enabledCollectors = [ "systemd" "processes" ];
port = 9002;
};
};
};
services.promtail = {
enable = cfg.enablePromtail;
configFile = import ./promtail_config.nix {
lokiAddress = cfg.lokiHost;
logNginx = cfg.logNginx;
config = config;
pkgs = pkgs;
};
};
users.users.promtail.extraGroups = [ "systemd-journal" ] ++ (lib.optionals cfg.logNginx [ "nginx" ]) ;
};
}

View File

@@ -86,12 +86,6 @@ in
in in
builtins.listToAttrs (map mapperFunc cfg.deployHosts); builtins.listToAttrs (map mapperFunc cfg.deployHosts);
systemd.tmpfiles.rules = builtins.concatLists (map (name: [
"d /var/lib/microvms/${name}/var 0755 root root - -"
"d /var/lib/microvms/${name}/etc 0755 root root - -"
"d /${name} 0755 root root - -"
]) cfg.deployHosts);
systemd.services = builtins.foldl' (services: name: services // { systemd.services = builtins.foldl' (services: name: services // {
"microvm-update@${name}" = { "microvm-update@${name}" = {
description = "Update MicroVMs automatically"; description = "Update MicroVMs automatically";

View File

@@ -1,49 +0,0 @@
{ logNginx, lokiAddress, config, pkgs, ... }:
let
basecfg = ''
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://${lokiAddress}:3100/loki/api/v1/push
'';
withNginx = ''
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: ${config.networking.hostName}
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*log
'';
withoutNginx = ''
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: ${config.networking.hostName}
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
'';
in
pkgs.writeText "promtailcfg.yaml" (if logNginx then ''${basecfg}${withNginx}'' else ''${basecfg}${withoutNginx}'')

View File

@@ -1,115 +0,0 @@
{ config, lib, pkgs, inputs, ... }:
with lib;
{
networking = {
hostName = mkDefault "overwatch";
useDHCP = false;
};
imports = [
../modules/malobeo_user.nix
../modules/sshd.nix
];
networking.firewall.allowedTCPPorts = [ 80 9080 9001 3100 ];
services.grafana = {
enable = true;
domain = "grafana.malobeo.org";
port = 2342;
addr = "127.0.0.1";
provision.datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "loki";
type = "loki";
access = "proxy";
uid = "eeakiack8nqwwc";
url = "http://localhost:3100";
editable = false;
}
{
name = "prometheus";
type = "prometheus";
access = "proxy";
uid = "feakib1gq7ugwc";
url = "http://localhost:9001";
editable = false;
}
];
};
provision.dashboards.settings = {
apiVersion = 1;
providers = [{
name = "default";
options.path = ./dashboards;
}];
};
};
services.nginx = {
enable = true;
virtualHosts.${config.services.grafana.domain} = {
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
proxyWebsockets = true;
extraConfig = ''
proxy_set_header Host $host;
'';
};
};
};
services.prometheus = {
enable = true;
port = 9001;
scrapeConfigs = [
{
job_name = "overwatch";
static_configs = [{
targets = [ "127.0.0.1:9002" ];
}];
}
{
job_name = "durruti";
static_configs = [{
targets = [ "10.0.0.5:9002" ];
}];
}
{
job_name = "infradocs";
static_configs = [{
targets = [ "10.0.0.11:9002" ];
}];
}
{
job_name = "nextcloud";
static_configs = [{
targets = [ "10.0.0.13:9002" ];
}];
}
# add vpn - check how to reach it first. most probably 10.100.0.1
];
};
services.loki = {
enable = true;
configFile = ./loki.yaml;
};
users.users.promtail.extraGroups = [ "nginx" "systemd-journal" ];
system.stateVersion = "22.11"; # Did you read the comment?
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,60 +0,0 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: debug
grpc_server_max_concurrent_streams: 1000
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
pattern_ingester:
enabled: true
metric_aggregation:
loki_address: localhost:3100
ruler:
alertmanager_url: http://localhost:9093
frontend:
encoding: protobuf
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
#
# Statistics help us better understand how Loki is used, and they show us performance
# levels for most users. This helps us prioritize features and documentation.
# For more information on what's sent, look at
# https://github.com/grafana/loki/blob/main/pkg/analytics/stats.go
# Refer to the buildReport method to see what goes into a report.
#
# If you would like to disable reporting, uncomment the following lines:
analytics:
reporting_enabled: false

View File

@@ -1,29 +0,0 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://10.0.0.13:3100/loki/api/v1/push
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: overwatch
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*log

View File

@@ -6,6 +6,7 @@ with lib;
networking = { networking = {
hostName = mkDefault "uptimekuma"; hostName = mkDefault "uptimekuma";
useDHCP = false; useDHCP = false;
nameservers = [ "1.1.1.1" ];
}; };
imports = [ imports = [

View File

@@ -17,7 +17,6 @@ with lib;
}; };
imports = [ imports = [
inputs.self.nixosModules.malobeo.vpn
../modules/malobeo_user.nix ../modules/malobeo_user.nix
../modules/sshd.nix ../modules/sshd.nix
../modules/minimal_tools.nix ../modules/minimal_tools.nix

View File

@@ -12,241 +12,8 @@
let filter_system = name: if name == utils.lib.system.i686-linux then false else true; let filter_system = name: if name == utils.lib.system.i686-linux then false else true;
in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems) ( system: in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems) ( system:
let let
baseModules = [
# make flake inputs accessiable in NixOS
{ _module.args.inputs = inputs; }
{
imports = [
({ pkgs, ... }: {
nix = {
extraOptions = ''
experimental-features = nix-command flakes
'';
settings = {
substituters = [
"https://cache.dynamicdiscord.de"
"https://cache.nixos.org/"
];
trusted-public-keys = [
"cache.dynamicdiscord.de:DKueZicqi2NhJJXz9MYgUbiyobMs10fTyHCgAUibRP4="
];
trusted-users = [ "root" "@wheel" ];
};
};
})
sops-nix.nixosModules.sops
#microvm.nixosModules.microvm
];
}
];
defaultModules = baseModules;
makeMicroVM = hostName: ipv4Addr: macAddr: modules: [
self.nixosModules.malobeo.metrics
{
microvm = {
hypervisor = "cloud-hypervisor";
mem = 2560;
shares = [
{
source = "/nix/store";
mountPoint = "/nix/.ro-store";
tag = "store";
proto = "virtiofs";
socket = "store.socket";
}
{
source = "/var/lib/microvms/${hostName}/etc";
mountPoint = "/etc";
tag = "etc";
proto = "virtiofs";
socket = "etc.socket";
}
{
source = "/var/lib/microvms/${hostName}/var";
mountPoint = "/var";
tag = "var";
proto = "virtiofs";
socket = "var.socket";
}
];
interfaces = [
{
type = "tap";
id = "vm-${hostName}";
mac = "${macAddr}";
}
];
};
malobeo.metrics = {
enable = true;
enablePromtail = true;
logNginx = false;
lokiHost = "10.0.0.14";
};
systemd.network.enable = true;
systemd.network.networks."20-lan" = {
matchConfig.Type = "ether";
networkConfig = {
Address = [ "${ipv4Addr}/24" ];
Gateway = "10.0.0.1";
DNS = ["1.1.1.1"];
DHCP = "no";
};
};
}
] ++ defaultModules ++ modules;
pkgs-unstable = nixpkgs-unstable.legacyPackages."${system}"; pkgs-unstable = nixpkgs-unstable.legacyPackages."${system}";
pkgs = nixpkgs.legacyPackages."${system}"; pkgs = nixpkgs.legacyPackages."${system}";
vmMicroVMOverwrites = hostname: options: {
microvm = {
mem = pkgs.lib.mkForce 4096;
hypervisor = pkgs.lib.mkForce "qemu";
socket = pkgs.lib.mkForce null;
#needed for hosts that deploy imperative microvms (for example fanny)
writableStoreOverlay = pkgs.lib.mkIf options.writableStore "/nix/.rw-store";
volumes = pkgs.lib.mkIf options.writableStore [ {
image = "nix-store-overlay.img";
mountPoint = self.nixosConfigurations.${hostname}.config.microvm.writableStoreOverlay;
size = 2048;
} ];
shares = pkgs.lib.mkForce (pkgs.lib.optionals (!options.writableStore) [
{
tag = "ro-store";
source = "/nix/store";
mountPoint = "/nix/.ro-store";
}
] ++ pkgs.lib.optionals (options.varPath != "") [
{
source = "${options.varPath}";
securityModel = "mapped";
mountPoint = "/var";
tag = "var";
}
]);
interfaces = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [{
type = "user";
id = "eth0";
mac = "02:23:de:ad:be:ef";
}]);
#if networking is disabled forward port 80 to still have access to webservices
forwardPorts = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [
{ from = "host"; host.port = 8080; guest.port = 80; }
]);
};
fileSystems = {
"/".fsType = pkgs.lib.mkForce "tmpfs";
# prometheus uses a memory mapped file which doesnt seem supported by 9p shares
# therefore we mount a tmpfs inside the datadir
"/var/lib/prometheus2/data" = pkgs.lib.mkIf (hostname == "overwatch" && options.varPath != "") (pkgs.lib.mkForce {
fsType = pkgs.lib.mkForce "tmpfs";
});
};
boot.isContainer = pkgs.lib.mkForce false;
services.timesyncd.enable = false;
users.users.root.password = "";
services.getty.helpLine = ''
Log in as "root" with an empty password.
Use "reboot" to shut qemu down.
'';
};
vmDiskoOverwrites = {
boot.initrd = {
secrets = pkgs.lib.mkForce {};
network.ssh.enable = pkgs.lib.mkForce false;
};
malobeo.disks.enable = pkgs.lib.mkForce false;
networking.hostId = "a3c3101f";
};
vmSopsOverwrites = host: {
sops.defaultSopsFile = pkgs.lib.mkForce ./machines/${host}/dummy.yaml;
environment.etc = {
devHostKey = {
source = ./machines/secrets/devkey_ed25519;
mode = "0600";
};
};
services.openssh.hostKeys = [{
path = "/etc/devHostKey";
type = "ed25519";
}];
};
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: (self.nixosConfigurations.${host}.extendModules {
modules = [
(vmMicroVMOverwrites host {
withNetworking = networking;
varPath = "${varPath}";
writableStore = writableStore; })
(if sopsDummy then (vmSopsOverwrites host) else {})
(if disableDisko then vmDiskoOverwrites else {})
] ++ pkgs.lib.optionals (! self.nixosConfigurations.${host}.config ? microvm) [
#microvm.nixosModules.microvm
] ++ pkgs.lib.optionals (self.nixosConfigurations.${host}.config ? services.malobeo.microvm.deployHosts) [
#microvm.nixosModules.host
{
services.malobeo.microvm.deployHosts = pkgs.lib.mkForce [];
systemd.tmpfiles.rules = builtins.concatLists (map (name: [
"q /var/lib/microvms/${name}/var 0755 root root - -"
"q /var/lib/microvms/${name}/etc 0755 root root - -"
"q /var/${name}/wow/it/works 0755 root root - -"
"q /var/lib/${name} 0755 root root - -"
"d /${name} 0755 root root - -"
]) self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
microvm.vms =
let
# Map the values to each hostname to then generate an Attrset using listToAttrs
mapperFunc = name: { inherit name; value = {
#pkgs = import self.nixosConfigurations.${name}.config.nixpkgs;
#pkgs = (buildVM name networking sopsDummy false "" false).config.nixpkgs;
#config = (buildVM name networking sopsDummy false "" false);
#pkgs = pkgs;
#config = self.nixosConfigurations.${name};
specialArgs.inputs = inputs;
specialArgs.self = self;
config = {
imports = (makeMicroVM "${name}" "10.0.0.11" "D0:E5:CA:F0:D7:E7" [
#(vmMicroVMOverwrites name {
# withNetworking = true;
# varPath = "";
# writableStore = false; })
(if sopsDummy then (vmSopsOverwrites name) else {})
]);
};
}; };
in
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
}];
});
in in
{ {
devShells.default = devShells.default =
@@ -271,19 +38,14 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
pkgs.mdbook pkgs.mdbook
microvmpkg.microvm microvmpkg.microvm
]; ];
packages = builtins.map (pkgName: self.legacyPackages."${pkgs.system}".scripts.${pkgName}) installed; packages = builtins.map (pkgName: self.legacyPackages."${pkgs.system}".scripts.${pkgName}) installed;
shellHook = ''echo "Available scripts: ${builtins.concatStringsSep " " installed}"''; shellHook = ''echo "Available scripts: ${builtins.concatStringsSep " " installed}"'';
}; };
legacyPackages = { legacyPackages = {
scripts.remote-install = pkgs.writeShellScriptBin "remote-install" (builtins.readFile ./scripts/remote-install-encrypt.sh); scripts.remote-install = pkgs.writeShellScriptBin "remote-install" (builtins.readFile ./scripts/remote-install-encrypt.sh);
scripts.boot-unlock = pkgs.writeShellScriptBin "boot-unlock" (builtins.readFile ./scripts/unlock-boot.sh); scripts.boot-unlock = pkgs.writeShellScriptBin "boot-unlock" (builtins.readFile ./scripts/unlock-boot.sh);
scripts.run-vm = self.packages.${system}.run-vm;
}; };
vmBuilder = buildVM;
packages = { packages = {
docs = pkgs.stdenv.mkDerivation { docs = pkgs.stdenv.mkDerivation {
name = "malobeo-docs"; name = "malobeo-docs";
@@ -301,91 +63,102 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
cp -r ./book/* $dest cp -r ./book/* $dest
''; '';
}; };
} //
run-vm = pkgs.writeShellScriptBin "run-vm" '' builtins.foldl'
usage() { (result: host:
echo "Usage: run-vm <hostname> [--networking] [--dummy-secrets] [--no-disko]" let
echo "ATTENTION: This script must be run from the flakes root directory" inherit (self.nixosConfigurations.${host}) config;
echo "--networking setup interfaces. requires root and hostbridge enabled on the host" in
echo "--dummy-secrets run vm with dummy sops secrets" result // {
echo "--no-disko disable disko and initrd secrets. needed for real hosts like fanny" # boot any machine in a microvm
echo "--writable-store enables writable store. necessary for host with nested imperative microvms like fanny" "${host}-vm" = (self.nixosConfigurations.${host}.extendModules {
echo "--var path to directory that should be shared as /var. may require root otherwise some systemd units fail within vm. if dir is empty vm will populate" modules = [{
exit 1 microvm = {
} mem = pkgs.lib.mkForce 4096;
hypervisor = pkgs.lib.mkForce "qemu";
socket = pkgs.lib.mkForce null;
shares = pkgs.lib.mkForce [{
tag = "ro-store";
source = "/nix/store";
mountPoint = "/nix/.ro-store";
}];
};
boot.isContainer = pkgs.lib.mkForce false;
users.users.root.password = "";
fileSystems."/".fsType = pkgs.lib.mkForce "tmpfs";
services.getty.helpLine = ''
Log in as "root" with an empty password.
Use "reboot" to shut qemu down.
'';
}] ++ pkgs.lib.optionals (! config ? microvm) [
microvm.nixosModules.microvm
];
}).config.microvm.declaredRunner;
})
{ }
(builtins.attrNames self.nixosConfigurations) //
# check at least one arg was given builtins.foldl'
if [ "$#" -lt 1 ]; then (result: host:
usage let
fi inherit (self.nixosConfigurations.${host}) config;
in
result // {
# boot any machine in a microvm
"${host}-vm-withsops" = (self.nixosConfigurations.${host}.extendModules {
modules = [{
sops.defaultSopsFile = pkgs.lib.mkForce ./machines/${host}/dummy.yaml;
HOSTNAME=$1 environment.etc = {
devHostKey = {
source = ./machines/secrets/devkey_ed25519;
mode = "0600";
};
};
# Optionale Argumente services.openssh.hostKeys = [{
NETWORK=false path = "/etc/devHostKey";
DUMMY_SECRETS=false type = "ed25519";
NO_DISKO=false }];
RW_STORE=false
VAR_PATH=""
# check argws microvm = {
shift mem = pkgs.lib.mkForce 4096;
while [[ "$#" -gt 0 ]]; do hypervisor = pkgs.lib.mkForce "qemu";
case $1 in socket = pkgs.lib.mkForce null;
--networking) NETWORK=true ;; shares = pkgs.lib.mkForce [
--dummy-secrets) DUMMY_SECRETS=true ;; {
--no-disko) NO_DISKO=true ;; tag = "ro-store";
--writable-store) RW_STORE=true ;; source = "/nix/store";
--var) mountPoint = "/nix/.ro-store";
if [[ -n "$2" && ! "$2" =~ ^- ]]; then }
VAR_PATH="$2" ];
shift };
else boot.isContainer = pkgs.lib.mkForce false;
echo "Error: --var requires a non-empty string argument." users.users.root.password = "";
usage fileSystems."/".fsType = pkgs.lib.mkForce "tmpfs";
fi services.getty.helpLine = ''
;; Log in as "root" with an empty password.
*) echo "Unknown argument: $1"; usage ;; Use "reboot" to shut qemu down.
esac '';
shift }] ++ pkgs.lib.optionals (! config ? microvm) [
done microvm.nixosModules.microvm
echo "starting host $HOSTNAME" ];
echo "enable networking: $NETWORK" }).config.microvm.declaredRunner;
echo "deploy dummy secrets: $DUMMY_SECRETS" })
echo "disable disko and initrd secrets: $NO_DISKO" { }
echo "use writable store: $RW_STORE" (builtins.attrNames self.nixosConfigurations);
if [ -n "$VAR_PATH" ]; then
echo "sharing var directory: $VAR_PATH"
fi
${pkgs.nix}/bin/nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\" $RW_STORE).config.microvm.declaredRunner"
'';
};
apps = { apps = {
docs = { docs = {
type = "app"; type = "app";
program = builtins.toString (pkgs.writeShellScript "docs" '' program = builtins.toString (pkgs.writeShellScript "docs" ''
${pkgs.xdg-utils}/bin/xdg-open "${self.packages.${system}.docs}/share/doc/index.html"
'');
};
docsDev = {
type = "app";
program = builtins.toString (pkgs.writeShellScript "docs" ''
echo "needs to run from infrastuctre root folder"
${pkgs.mdbook}/bin/mdbook serve --open ./doc ${pkgs.mdbook}/bin/mdbook serve --open ./doc
''); '');
}; };
run-vm = {
type = "app";
program = "${self.packages.${system}.run-vm}/bin/run-vm";
};
}; };
})) // { })) // rec {
nixosConfigurations = import ./machines/configuration.nix (inputs // { nixosConfigurations = import ./machines/configuration.nix (inputs // {
inherit inputs; inherit inputs;
self = self; self = self;
@@ -396,7 +169,6 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
microvm.imports = [ ./machines/modules/malobeo/microvm_host.nix ]; microvm.imports = [ ./machines/modules/malobeo/microvm_host.nix ];
vpn.imports = [ ./machines/modules/malobeo/wireguard.nix ]; vpn.imports = [ ./machines/modules/malobeo/wireguard.nix ];
initssh.imports = [ ./machines/modules/malobeo/initssh.nix ]; initssh.imports = [ ./machines/modules/malobeo/initssh.nix ];
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
disko.imports = [ ./machines/modules/disko ]; disko.imports = [ ./machines/modules/disko ];
}; };