46 Commits

Author SHA1 Message Date
af11810935 wip
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m5s
2025-01-21 22:17:16 +01:00
81f73a0704 [overwatch] grafana provision datasource and dashboards
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m3s
2025-01-20 22:31:36 +01:00
7730163b84 [metrics] enable on all microvms
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m9s
2025-01-20 21:32:07 +01:00
725efc3e0a [metrics] init module 2025-01-20 21:32:07 +01:00
724f14cfcd [overwatch] backup dashboard 2025-01-20 21:32:07 +01:00
cf3bfa1784 [infradocs] fix loki addr 2025-01-20 21:32:07 +01:00
c71d7959f2 [run-vm] handle edgecase for prometheus mmaped file on 9p share 2025-01-20 21:31:43 +01:00
155e78b519 [run-vm] use securityModel mapped to allow mounting /var 9p share 2025-01-20 21:31:43 +01:00
c54f04cb62 [nix] change .#docs to just start browser and use .#docsDev for local development 2025-01-20 21:31:43 +01:00
197a8427b7 [docs] add run-vm examples 2025-01-20 21:31:43 +01:00
1877f6dc9c [run-vm] allow sharing of /var/lib
sharing /var somehow doesnt work. for example nginx fails because of
lacking permissions to access /var/log/nginx. this also happens when
run-vm is started as root. thats why only /var/lib is shared which still
allows application persistency between tests
2025-01-20 21:31:43 +01:00
c47e93170f [run-vm] add flag to disable disko
needed to run fanny as vm
2025-01-20 21:31:43 +01:00
eea5d6924d [nix] add run-vm script 2025-01-20 21:31:43 +01:00
468106677c [vpn] update dummy secrets 2025-01-20 21:31:43 +01:00
df4d769f82 [lucia] init dummy secrets 2025-01-20 21:31:43 +01:00
249eebf164 [fanny] init dummy secrets 2025-01-20 21:31:43 +01:00
e3bcd250e0 [durruti] rm secrets.yaml - currently empty 2025-01-20 21:31:43 +01:00
1b871f3860 [nextcloud] add dummy secrets 2025-01-20 21:31:43 +01:00
3564436dfe [sops] add a dummy key to allow secret usage within test vms 2025-01-20 21:31:43 +01:00
4744324385 [sops] test sharing hostkey with vm 2025-01-20 21:31:43 +01:00
88ef307a65 [fanny] proxypass cloud.malobeo.org 2025-01-20 21:31:43 +01:00
dec968a4db [fanny] deploy nextcloud 2025-01-20 21:31:43 +01:00
283dc51d67 [nextcloud] rm obsolete nameserver 2025-01-20 21:31:43 +01:00
ahtlon
f4a6c40cd2 [nextcloud] nextcloud works now 2025-01-20 21:31:43 +01:00
ahtlon
23caa27d4e [nextcloud] flake update because for some reason the sha changed 2025-01-20 21:31:43 +01:00
ahtlon
d6aee8657b [nextcloud] enable postgress, redis, change domain 2025-01-20 21:31:43 +01:00
ahtlon
e7e05327e4 [nextcloud] rm discourse 2025-01-20 21:31:40 +01:00
1fc3538e03 [nextcloud] fix hostname 2025-01-20 21:30:48 +01:00
ahtlon
1ebee6d886 add nextcloud collectives 2025-01-20 21:30:48 +01:00
ahtlon
94e439bf0b login geht 2025-01-20 21:30:48 +01:00
ahtlon
e50f3349ba nextcloud minimal 2025-01-20 21:30:48 +01:00
ahtlon
18b747a7df sops.... 2025-01-20 21:30:48 +01:00
ahtlon
ea6e019b64 Start over but right this time 2025-01-20 21:30:48 +01:00
ahtlon
8581f762a2 basic discourse example 2025-01-20 21:30:48 +01:00
ahtlon
b223f0cb0c with nix flake check the hydraJobs output is evaluated in the same way as Hydra's hydra-eval-jobs 2025-01-20 21:30:48 +01:00
ahtlon
9ba607ce16 Only run on push 2025-01-20 21:30:48 +01:00
ahtlon
34c2661c53 [actions] Add flake check 2025-01-20 21:30:48 +01:00
ahtlon
211799b6b9 [modules] move microvm module import from makeMicroVM to baseModules 2025-01-20 21:30:48 +01:00
ahtlon
543c4ed49e [scripts] move packages to legacyPackages 2025-01-20 21:30:48 +01:00
ahtlon
27085dd3e6 [scripts] check for flake.nix 2025-01-20 21:30:48 +01:00
ahtlon
8076956982 [packages] make scripts available in shell without nix run 2025-01-20 21:30:48 +01:00
26829f9255 [infradocs] provide stats
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m55s
Evaluate Hydra Jobs / eval-hydra-jobs (pull_request) Successful in 5m57s
2025-01-18 20:01:27 +01:00
0d93cad9d4 [overwatch] init
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m19s
2025-01-18 11:40:27 +01:00
02a57d98d0 [microvms] rm nameserver option 2025-01-18 11:39:36 +01:00
4553c2c069 [nix] do not rm network interfaces of vms 2025-01-17 16:19:19 +01:00
3f3dca3c7f [docs] make readme the index
still most of it is quite out of date...
2025-01-17 14:30:49 +01:00
15 changed files with 25547 additions and 20 deletions

View File

@@ -43,6 +43,7 @@ let
defaultModules = baseModules;
makeMicroVM = hostName: ipv4Addr: macAddr: modules: [
self.nixosModules.malobeo.metrics
{
microvm = {
hypervisor = "cloud-hypervisor";
@@ -79,6 +80,13 @@ let
}
];
};
malobeo.metrics = {
enable = true;
enablePromtail = true;
logNginx = false;
lokiHost = "10.0.0.14";
};
systemd.network.enable = true;
@@ -179,11 +187,19 @@ in
];
};
overwatch = nixosSystem {
system = "x86_64-linux";
specialArgs.inputs = inputs;
specialArgs.self = self;
modules = makeMicroVM "overwatch" "10.0.0.14" "D0:E5:CA:F0:D7:E0" [
./overwatch/configuration.nix
];
};
testvm = nixosSystem {
system = "x86_64-linux";
specialArgs.inputs = inputs;
specialArgs.self = self;
modules = defaultModules ++ [ ./testvm ];
};
}

View File

@@ -6,7 +6,6 @@ with lib;
networking = {
hostName = mkDefault "durruti";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
networking.firewall.allowedTCPPorts = [ 8080 ];

View File

@@ -53,7 +53,7 @@ in
};
services.malobeo.microvm.enableHostBridge = true;
services.malobeo.microvm.deployHosts = [ "infradocs" "nextcloud" ];
services.malobeo.microvm.deployHosts = [ "infradocs" "nextcloud" "overwatch" ];
networking = {
firewall = {

View File

@@ -6,7 +6,6 @@ with lib;
networking = {
hostName = mkDefault "infradocs";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
imports = [
@@ -15,6 +14,12 @@ with lib;
../modules/sshd.nix
];
networking.firewall.allowedTCPPorts = [ 9002 ];
malobeo.metrics.logNginx = lib.mkForce true;
users.users.promtail.extraGroups = [ "nginx" "systemd-journal" ];
system.stateVersion = "22.11"; # Did you read the comment?
}

View File

@@ -0,0 +1,56 @@
{ config, lib, pkgs, ... }:
let
cfg = config.malobeo.metrics;
in
{
options.malobeo.metrics = {
enable = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable sharing metrics";
};
enablePromtail = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Enable sharing logs";
};
logNginx = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Share nginx logs";
};
lokiHost = lib.mkOption {
type = lib.types.str;
default = "10.0.0.14";
description = "Address of loki host";
};
};
config = lib.mkIf (cfg.enable) {
networking.firewall.allowedTCPPorts = [ 9002 ];
services.prometheus = {
exporters = {
node = {
enable = true;
enabledCollectors = [ "systemd" "processes" ];
port = 9002;
};
};
};
services.promtail = {
enable = cfg.enablePromtail;
configFile = import ./promtail_config.nix {
lokiAddress = cfg.lokiHost;
logNginx = cfg.logNginx;
config = config;
pkgs = pkgs;
};
};
users.users.promtail.extraGroups = [ "systemd-journal" ] ++ (lib.optionals cfg.logNginx [ "nginx" ]) ;
};
}

View File

@@ -86,6 +86,12 @@ in
in
builtins.listToAttrs (map mapperFunc cfg.deployHosts);
systemd.tmpfiles.rules = builtins.concatLists (map (name: [
"d /var/lib/microvms/${name}/var 0755 root root - -"
"d /var/lib/microvms/${name}/etc 0755 root root - -"
"d /${name} 0755 root root - -"
]) cfg.deployHosts);
systemd.services = builtins.foldl' (services: name: services // {
"microvm-update@${name}" = {
description = "Update MicroVMs automatically";

View File

@@ -0,0 +1,49 @@
{ logNginx, lokiAddress, config, pkgs, ... }:
let
basecfg = ''
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://${lokiAddress}:3100/loki/api/v1/push
'';
withNginx = ''
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: ${config.networking.hostName}
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*log
'';
withoutNginx = ''
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: ${config.networking.hostName}
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
'';
in
pkgs.writeText "promtailcfg.yaml" (if logNginx then ''${basecfg}${withNginx}'' else ''${basecfg}${withoutNginx}'')

View File

@@ -0,0 +1,115 @@
{ config, lib, pkgs, inputs, ... }:
with lib;
{
networking = {
hostName = mkDefault "overwatch";
useDHCP = false;
};
imports = [
../modules/malobeo_user.nix
../modules/sshd.nix
];
networking.firewall.allowedTCPPorts = [ 80 9080 9001 3100 ];
services.grafana = {
enable = true;
domain = "grafana.malobeo.org";
port = 2342;
addr = "127.0.0.1";
provision.datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "loki";
type = "loki";
access = "proxy";
uid = "eeakiack8nqwwc";
url = "http://localhost:3100";
editable = false;
}
{
name = "prometheus";
type = "prometheus";
access = "proxy";
uid = "feakib1gq7ugwc";
url = "http://localhost:9001";
editable = false;
}
];
};
provision.dashboards.settings = {
apiVersion = 1;
providers = [{
name = "default";
options.path = ./dashboards;
}];
};
};
services.nginx = {
enable = true;
virtualHosts.${config.services.grafana.domain} = {
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
proxyWebsockets = true;
extraConfig = ''
proxy_set_header Host $host;
'';
};
};
};
services.prometheus = {
enable = true;
port = 9001;
scrapeConfigs = [
{
job_name = "overwatch";
static_configs = [{
targets = [ "127.0.0.1:9002" ];
}];
}
{
job_name = "durruti";
static_configs = [{
targets = [ "10.0.0.5:9002" ];
}];
}
{
job_name = "infradocs";
static_configs = [{
targets = [ "10.0.0.11:9002" ];
}];
}
{
job_name = "nextcloud";
static_configs = [{
targets = [ "10.0.0.13:9002" ];
}];
}
# add vpn - check how to reach it first. most probably 10.100.0.1
];
};
services.loki = {
enable = true;
configFile = ./loki.yaml;
};
users.users.promtail.extraGroups = [ "nginx" "systemd-journal" ];
system.stateVersion = "22.11"; # Did you read the comment?
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,60 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: debug
grpc_server_max_concurrent_streams: 1000
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
pattern_ingester:
enabled: true
metric_aggregation:
loki_address: localhost:3100
ruler:
alertmanager_url: http://localhost:9093
frontend:
encoding: protobuf
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
#
# Statistics help us better understand how Loki is used, and they show us performance
# levels for most users. This helps us prioritize features and documentation.
# For more information on what's sent, look at
# https://github.com/grafana/loki/blob/main/pkg/analytics/stats.go
# Refer to the buildReport method to see what goes into a report.
#
# If you would like to disable reporting, uncomment the following lines:
analytics:
reporting_enabled: false

View File

@@ -0,0 +1,29 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://10.0.0.13:3100/loki/api/v1/push
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: overwatch
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*log

View File

@@ -6,7 +6,6 @@ with lib;
networking = {
hostName = mkDefault "uptimekuma";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
imports = [

View File

@@ -17,6 +17,7 @@ with lib;
};
imports = [
inputs.self.nixosModules.malobeo.vpn
../modules/malobeo_user.nix
../modules/sshd.nix
../modules/minimal_tools.nix

View File

@@ -12,15 +12,116 @@
let filter_system = name: if name == utils.lib.system.i686-linux then false else true;
in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems) ( system:
let
baseModules = [
# make flake inputs accessiable in NixOS
{ _module.args.inputs = inputs; }
{
imports = [
({ pkgs, ... }: {
nix = {
extraOptions = ''
experimental-features = nix-command flakes
'';
settings = {
substituters = [
"https://cache.dynamicdiscord.de"
"https://cache.nixos.org/"
];
trusted-public-keys = [
"cache.dynamicdiscord.de:DKueZicqi2NhJJXz9MYgUbiyobMs10fTyHCgAUibRP4="
];
trusted-users = [ "root" "@wheel" ];
};
};
})
sops-nix.nixosModules.sops
#microvm.nixosModules.microvm
];
}
];
defaultModules = baseModules;
makeMicroVM = hostName: ipv4Addr: macAddr: modules: [
self.nixosModules.malobeo.metrics
{
microvm = {
hypervisor = "cloud-hypervisor";
mem = 2560;
shares = [
{
source = "/nix/store";
mountPoint = "/nix/.ro-store";
tag = "store";
proto = "virtiofs";
socket = "store.socket";
}
{
source = "/var/lib/microvms/${hostName}/etc";
mountPoint = "/etc";
tag = "etc";
proto = "virtiofs";
socket = "etc.socket";
}
{
source = "/var/lib/microvms/${hostName}/var";
mountPoint = "/var";
tag = "var";
proto = "virtiofs";
socket = "var.socket";
}
];
interfaces = [
{
type = "tap";
id = "vm-${hostName}";
mac = "${macAddr}";
}
];
};
malobeo.metrics = {
enable = true;
enablePromtail = true;
logNginx = false;
lokiHost = "10.0.0.14";
};
systemd.network.enable = true;
systemd.network.networks."20-lan" = {
matchConfig.Type = "ether";
networkConfig = {
Address = [ "${ipv4Addr}/24" ];
Gateway = "10.0.0.1";
DNS = ["1.1.1.1"];
DHCP = "no";
};
};
}
] ++ defaultModules ++ modules;
pkgs-unstable = nixpkgs-unstable.legacyPackages."${system}";
pkgs = nixpkgs.legacyPackages."${system}";
vmMicroVMOverwrites = options: {
vmMicroVMOverwrites = hostname: options: {
microvm = {
mem = pkgs.lib.mkForce 4096;
hypervisor = pkgs.lib.mkForce "qemu";
socket = pkgs.lib.mkForce null;
shares = pkgs.lib.mkForce ([
#needed for hosts that deploy imperative microvms (for example fanny)
writableStoreOverlay = pkgs.lib.mkIf options.writableStore "/nix/.rw-store";
volumes = pkgs.lib.mkIf options.writableStore [ {
image = "nix-store-overlay.img";
mountPoint = self.nixosConfigurations.${hostname}.config.microvm.writableStoreOverlay;
size = 2048;
} ];
shares = pkgs.lib.mkForce (pkgs.lib.optionals (!options.writableStore) [
{
tag = "ro-store";
source = "/nix/store";
@@ -29,22 +130,33 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
] ++ pkgs.lib.optionals (options.varPath != "") [
{
source = "${options.varPath}";
mountPoint = "/var/lib";
tag = "varlib";
securityModel = "mapped";
mountPoint = "/var";
tag = "var";
}
]);
interfaces = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [{
type = "user";
id = "eth0";
mac = "02:23:de:ad:be:ef";
}]);
#if networking is disabled forward port 80 to still have access to webservices
forwardPorts = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [
{ from = "host"; host.port = 8080; guest.port = 80; }
]);
};
fileSystems = {
"/".fsType = pkgs.lib.mkForce "tmpfs";
"/var/lib" = pkgs.lib.mkIf (options.varPath != "") {
depends = [ "/var" ];
};
# prometheus uses a memory mapped file which doesnt seem supported by 9p shares
# therefore we mount a tmpfs inside the datadir
"/var/lib/prometheus2/data" = pkgs.lib.mkIf (hostname == "overwatch" && options.varPath != "") (pkgs.lib.mkForce {
fsType = pkgs.lib.mkForce "tmpfs";
});
};
boot.isContainer = pkgs.lib.mkForce false;
@@ -82,15 +194,59 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
}];
};
buildVM = host: networking: sopsDummy: disableDisko: varPath: (self.nixosConfigurations.${host}.extendModules {
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: (self.nixosConfigurations.${host}.extendModules {
modules = [
(vmMicroVMOverwrites { withNetworking = networking; varPath = "${varPath}"; })
(vmMicroVMOverwrites host {
withNetworking = networking;
varPath = "${varPath}";
writableStore = writableStore; })
(if sopsDummy then (vmSopsOverwrites host) else {})
(if disableDisko then vmDiskoOverwrites else {})
] ++ pkgs.lib.optionals (! self.nixosConfigurations.${host}.config ? microvm) [
microvm.nixosModules.microvm
];
}).config.microvm.declaredRunner;
#microvm.nixosModules.microvm
] ++ pkgs.lib.optionals (self.nixosConfigurations.${host}.config ? services.malobeo.microvm.deployHosts) [
#microvm.nixosModules.host
{
services.malobeo.microvm.deployHosts = pkgs.lib.mkForce [];
systemd.tmpfiles.rules = builtins.concatLists (map (name: [
"q /var/lib/microvms/${name}/var 0755 root root - -"
"q /var/lib/microvms/${name}/etc 0755 root root - -"
"q /var/${name}/wow/it/works 0755 root root - -"
"q /var/lib/${name} 0755 root root - -"
"d /${name} 0755 root root - -"
]) self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
microvm.vms =
let
# Map the values to each hostname to then generate an Attrset using listToAttrs
mapperFunc = name: { inherit name; value = {
#pkgs = import self.nixosConfigurations.${name}.config.nixpkgs;
#pkgs = (buildVM name networking sopsDummy false "" false).config.nixpkgs;
#config = (buildVM name networking sopsDummy false "" false);
#pkgs = pkgs;
#config = self.nixosConfigurations.${name};
specialArgs.inputs = inputs;
specialArgs.self = self;
config = {
imports = (makeMicroVM "${name}" "10.0.0.11" "D0:E5:CA:F0:D7:E7" [
#(vmMicroVMOverwrites name {
# withNetworking = true;
# varPath = "";
# writableStore = false; })
(if sopsDummy then (vmSopsOverwrites name) else {})
]);
};
}; };
in
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
}];
});
in
{
devShells.default =
@@ -153,7 +309,8 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
echo "--networking setup interfaces. requires root and hostbridge enabled on the host"
echo "--dummy-secrets run vm with dummy sops secrets"
echo "--no-disko disable disko and initrd secrets. needed for real hosts like fanny"
echo "--varlib path to directory that should be shared as /var/lib. may require root otherwise some systemd units fail within vm. if dir is empty vm will populate"
echo "--writable-store enables writable store. necessary for host with nested imperative microvms like fanny"
echo "--var path to directory that should be shared as /var. may require root otherwise some systemd units fail within vm. if dir is empty vm will populate"
exit 1
}
@@ -168,6 +325,7 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
NETWORK=false
DUMMY_SECRETS=false
NO_DISKO=false
RW_STORE=false
VAR_PATH=""
# check argws
@@ -177,7 +335,8 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
--networking) NETWORK=true ;;
--dummy-secrets) DUMMY_SECRETS=true ;;
--no-disko) NO_DISKO=true ;;
--varlib)
--writable-store) RW_STORE=true ;;
--var)
if [[ -n "$2" && ! "$2" =~ ^- ]]; then
VAR_PATH="$2"
shift
@@ -194,11 +353,12 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
echo "enable networking: $NETWORK"
echo "deploy dummy secrets: $DUMMY_SECRETS"
echo "disable disko and initrd secrets: $NO_DISKO"
echo "use writable store: $RW_STORE"
if [ -n "$VAR_PATH" ]; then
echo "sharing var directory: $VAR_PATH"
fi
${pkgs.nix}/bin/nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\")"
${pkgs.nix}/bin/nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\" $RW_STORE).config.microvm.declaredRunner"
'';
};
@@ -206,6 +366,15 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
docs = {
type = "app";
program = builtins.toString (pkgs.writeShellScript "docs" ''
${pkgs.xdg-utils}/bin/xdg-open "${self.packages.${system}.docs}/share/doc/index.html"
'');
};
docsDev = {
type = "app";
program = builtins.toString (pkgs.writeShellScript "docs" ''
echo "needs to run from infrastuctre root folder"
${pkgs.mdbook}/bin/mdbook serve --open ./doc
'');
};
@@ -227,6 +396,7 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
microvm.imports = [ ./machines/modules/malobeo/microvm_host.nix ];
vpn.imports = [ ./machines/modules/malobeo/wireguard.nix ];
initssh.imports = [ ./machines/modules/malobeo/initssh.nix ];
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
disko.imports = [ ./machines/modules/disko ];
};