39 Commits

Author SHA1 Message Date
22282c1a2f [run-vm] handle edgecase for prometheus mmaped file on 9p share
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m15s
2025-01-20 15:43:34 +01:00
e102d3fb94 [run-vm] use securityModel mapped to allow mounting /var 9p share 2025-01-20 15:43:03 +01:00
a6b1994938 [nix] change .#docs to just start browser and use .#docsDev for local development
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m16s
2025-01-20 13:09:16 +01:00
b381173dad [docs] add run-vm examples
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m22s
2025-01-20 12:27:05 +01:00
7fee35d3d7 [run-vm] allow sharing of /var/lib
All checks were successful
Check flake syntax / flake-check (push) Successful in 4m3s
sharing /var somehow doesnt work. for example nginx fails because of
lacking permissions to access /var/log/nginx. this also happens when
run-vm is started as root. thats why only /var/lib is shared which still
allows application persistency between tests
2025-01-20 12:10:31 +01:00
4d477ce648 [run-vm] add flag to disable disko
All checks were successful
Check flake syntax / flake-check (push) Successful in 10m27s
needed to run fanny as vm
2025-01-20 03:06:58 +01:00
74885a7ce1 [nix] add run-vm script 2025-01-20 02:37:19 +01:00
eafe7a6b95 [vpn] update dummy secrets
All checks were successful
Check flake syntax / flake-check (push) Successful in 8m32s
2025-01-19 23:48:02 +01:00
fe9ff06aae [lucia] init dummy secrets 2025-01-19 23:47:50 +01:00
ba6e219d64 [fanny] init dummy secrets 2025-01-19 23:47:41 +01:00
7431209bc2 [durruti] rm secrets.yaml - currently empty 2025-01-19 23:44:51 +01:00
015c326042 [nix] rm vm interface overwrite
Some checks failed
Check flake syntax / flake-check (push) Failing after 6m22s
2025-01-19 23:40:15 +01:00
5f780e17eb [nextcloud] add dummy secrets 2025-01-19 23:39:51 +01:00
fda348f5da [sops] add a dummy key to allow secret usage within test vms 2025-01-19 22:55:54 +01:00
36ec5f5837 [sops] test sharing hostkey with vm 2025-01-19 22:55:52 +01:00
68b3da7df8 [fanny] proxypass cloud.malobeo.org
All checks were successful
Check flake syntax / flake-check (push) Successful in 6m3s
2025-01-19 14:53:39 +01:00
affcc71eb1 [fanny] deploy nextcloud
Some checks failed
Check flake syntax / flake-check (push) Has been cancelled
2025-01-19 14:52:33 +01:00
4462856fa0 [nextcloud] rm obsolete nameserver 2025-01-19 14:52:20 +01:00
5352c1fa4d [docs] make readme the index
still most of it is quite out of date...
2025-01-19 14:30:58 +01:00
ahtlon
fabf48a5c0 [nextcloud] nextcloud works now
All checks were successful
Check flake syntax / flake-check (push) Successful in 8m17s
2025-01-19 14:22:08 +01:00
ahtlon
617c177892 [nextcloud] flake update because for some reason the sha changed 2025-01-19 14:22:08 +01:00
ahtlon
9b4cd02e53 [nextcloud] enable postgress, redis, change domain 2025-01-19 14:22:08 +01:00
ahtlon
fab1b18263 [nextcloud] rm discourse 2025-01-19 14:22:08 +01:00
cbd041f563 [nextcloud] fix hostname 2025-01-19 14:22:08 +01:00
ahtlon
ef25c686b4 add nextcloud collectives 2025-01-19 14:22:08 +01:00
ahtlon
66392ca2c2 login geht 2025-01-19 14:22:08 +01:00
ahtlon
9afa8987e7 nextcloud minimal 2025-01-19 14:22:08 +01:00
ahtlon
0239733e62 sops.... 2025-01-19 14:22:08 +01:00
ahtlon
d9cf3588bf Start over but right this time 2025-01-19 14:22:08 +01:00
ahtlon
2500b8ab9a basic discourse example 2025-01-19 14:22:08 +01:00
ahtlon
52824e39ee with nix flake check the hydraJobs output is evaluated in the same way as Hydra's hydra-eval-jobs
All checks were successful
Check flake syntax / flake-check (push) Successful in 13m21s
2025-01-18 23:41:53 +01:00
ahtlon
8793120436 Only run on push 2025-01-18 23:40:11 +01:00
ahtlon
950ada1e10 [actions] Add flake check
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m33s
Check flake syntax / flake-check (push) Successful in 7m30s
2025-01-18 22:24:21 +01:00
ahtlon
1e269966ff Merge branch 'fix-flake-check'
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 5m54s
nix flake check and show now work again
2025-01-18 22:02:19 +01:00
ahtlon
3861daaf76 [modules] move microvm module import from makeMicroVM to baseModules 2025-01-18 22:01:06 +01:00
ahtlon
3a332e77d1 [scripts] move packages to legacyPackages 2025-01-18 21:45:48 +01:00
ahtlon
79c311b45d Merge branch 'issue51'
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 6m3s
Fixes #51
2025-01-18 20:41:06 +01:00
ahtlon
850070f987 [scripts] check for flake.nix
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m15s
2025-01-18 20:39:16 +01:00
ahtlon
d242562544 [packages] make scripts available in shell without nix run
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m7s
2025-01-18 20:04:22 +01:00
15 changed files with 12 additions and 25526 deletions

View File

@@ -43,7 +43,6 @@ let
defaultModules = baseModules;
makeMicroVM = hostName: ipv4Addr: macAddr: modules: [
self.nixosModules.malobeo.metrics
{
microvm = {
hypervisor = "cloud-hypervisor";
@@ -81,13 +80,6 @@ let
];
};
malobeo.metrics = {
enable = true;
enablePromtail = true;
logNginx = false;
lokiHost = "10.0.0.14";
};
systemd.network.enable = true;
systemd.network.networks."20-lan" = {
@@ -187,19 +179,11 @@ in
];
};
overwatch = nixosSystem {
system = "x86_64-linux";
specialArgs.inputs = inputs;
specialArgs.self = self;
modules = makeMicroVM "overwatch" "10.0.0.14" "D0:E5:CA:F0:D7:E0" [
./overwatch/configuration.nix
];
};
testvm = nixosSystem {
system = "x86_64-linux";
specialArgs.inputs = inputs;
specialArgs.self = self;
modules = defaultModules ++ [ ./testvm ];
};
}

View File

@@ -6,6 +6,7 @@ with lib;
networking = {
hostName = mkDefault "durruti";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
networking.firewall.allowedTCPPorts = [ 8080 ];

View File

@@ -53,7 +53,7 @@ in
};
services.malobeo.microvm.enableHostBridge = true;
services.malobeo.microvm.deployHosts = [ "infradocs" "nextcloud" "overwatch" ];
services.malobeo.microvm.deployHosts = [ "infradocs" "nextcloud" ];
networking = {
firewall = {

View File

@@ -6,6 +6,7 @@ with lib;
networking = {
hostName = mkDefault "infradocs";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
imports = [
@@ -14,12 +15,6 @@ with lib;
../modules/sshd.nix
];
networking.firewall.allowedTCPPorts = [ 9002 ];
malobeo.metrics.logNginx = lib.mkForce true;
users.users.promtail.extraGroups = [ "nginx" "systemd-journal" ];
system.stateVersion = "22.11"; # Did you read the comment?
}

View File

@@ -1,56 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.malobeo.metrics;
in
{
options.malobeo.metrics = {
enable = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable sharing metrics";
};
enablePromtail = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Enable sharing logs";
};
logNginx = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Share nginx logs";
};
lokiHost = lib.mkOption {
type = lib.types.str;
default = "10.0.0.14";
description = "Address of loki host";
};
};
config = lib.mkIf (cfg.enable) {
networking.firewall.allowedTCPPorts = [ 9002 ];
services.prometheus = {
exporters = {
node = {
enable = true;
enabledCollectors = [ "systemd" "processes" ];
port = 9002;
};
};
};
services.promtail = {
enable = cfg.enablePromtail;
configFile = import ./promtail_config.nix {
lokiAddress = cfg.lokiHost;
logNginx = cfg.logNginx;
config = config;
pkgs = pkgs;
};
};
users.users.promtail.extraGroups = [ "systemd-journal" ] ++ (lib.optionals cfg.logNginx [ "nginx" ]) ;
};
}

View File

@@ -86,12 +86,6 @@ in
in
builtins.listToAttrs (map mapperFunc cfg.deployHosts);
systemd.tmpfiles.rules = builtins.concatLists (map (name: [
"d /var/lib/microvms/${name}/var 0755 root root - -"
"d /var/lib/microvms/${name}/etc 0755 root root - -"
"d /${name} 0755 root root - -"
]) cfg.deployHosts);
systemd.services = builtins.foldl' (services: name: services // {
"microvm-update@${name}" = {
description = "Update MicroVMs automatically";

View File

@@ -1,49 +0,0 @@
{ logNginx, lokiAddress, config, pkgs, ... }:
let
basecfg = ''
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://${lokiAddress}:3100/loki/api/v1/push
'';
withNginx = ''
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: ${config.networking.hostName}
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*log
'';
withoutNginx = ''
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: ${config.networking.hostName}
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
'';
in
pkgs.writeText "promtailcfg.yaml" (if logNginx then ''${basecfg}${withNginx}'' else ''${basecfg}${withoutNginx}'')

View File

@@ -1,115 +0,0 @@
{ config, lib, pkgs, inputs, ... }:
with lib;
{
networking = {
hostName = mkDefault "overwatch";
useDHCP = false;
};
imports = [
../modules/malobeo_user.nix
../modules/sshd.nix
];
networking.firewall.allowedTCPPorts = [ 80 9080 9001 3100 ];
services.grafana = {
enable = true;
domain = "grafana.malobeo.org";
port = 2342;
addr = "127.0.0.1";
provision.datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "loki";
type = "loki";
access = "proxy";
uid = "eeakiack8nqwwc";
url = "http://localhost:3100";
editable = false;
}
{
name = "prometheus";
type = "prometheus";
access = "proxy";
uid = "feakib1gq7ugwc";
url = "http://localhost:9001";
editable = false;
}
];
};
provision.dashboards.settings = {
apiVersion = 1;
providers = [{
name = "default";
options.path = ./dashboards;
}];
};
};
services.nginx = {
enable = true;
virtualHosts.${config.services.grafana.domain} = {
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.grafana.port}";
proxyWebsockets = true;
extraConfig = ''
proxy_set_header Host $host;
'';
};
};
};
services.prometheus = {
enable = true;
port = 9001;
scrapeConfigs = [
{
job_name = "overwatch";
static_configs = [{
targets = [ "127.0.0.1:9002" ];
}];
}
{
job_name = "durruti";
static_configs = [{
targets = [ "10.0.0.5:9002" ];
}];
}
{
job_name = "infradocs";
static_configs = [{
targets = [ "10.0.0.11:9002" ];
}];
}
{
job_name = "nextcloud";
static_configs = [{
targets = [ "10.0.0.13:9002" ];
}];
}
# add vpn - check how to reach it first. most probably 10.100.0.1
];
};
services.loki = {
enable = true;
configFile = ./loki.yaml;
};
users.users.promtail.extraGroups = [ "nginx" "systemd-journal" ];
system.stateVersion = "22.11"; # Did you read the comment?
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,60 +0,0 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: debug
grpc_server_max_concurrent_streams: 1000
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
pattern_ingester:
enabled: true
metric_aggregation:
loki_address: localhost:3100
ruler:
alertmanager_url: http://localhost:9093
frontend:
encoding: protobuf
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
#
# Statistics help us better understand how Loki is used, and they show us performance
# levels for most users. This helps us prioritize features and documentation.
# For more information on what's sent, look at
# https://github.com/grafana/loki/blob/main/pkg/analytics/stats.go
# Refer to the buildReport method to see what goes into a report.
#
# If you would like to disable reporting, uncomment the following lines:
analytics:
reporting_enabled: false

View File

@@ -1,29 +0,0 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://10.0.0.13:3100/loki/api/v1/push
scrape_configs:
- job_name: journal
journal:
max_age: 12h
labels:
job: systemd-journal
host: overwatch
relabel_configs:
- source_labels: ["__journal__systemd_unit"]
target_label: "unit"
- job_name: nginx
static_configs:
- targets:
- localhost
labels:
job: nginx
__path__: /var/log/nginx/*log

View File

@@ -6,6 +6,7 @@ with lib;
networking = {
hostName = mkDefault "uptimekuma";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
imports = [

View File

@@ -17,7 +17,6 @@ with lib;
};
imports = [
inputs.self.nixosModules.malobeo.vpn
../modules/malobeo_user.nix
../modules/sshd.nix
../modules/minimal_tools.nix

View File

@@ -12,97 +12,6 @@
let filter_system = name: if name == utils.lib.system.i686-linux then false else true;
in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems) ( system:
let
baseModules = [
# make flake inputs accessiable in NixOS
{ _module.args.inputs = inputs; }
{
imports = [
({ pkgs, ... }: {
nix = {
extraOptions = ''
experimental-features = nix-command flakes
'';
settings = {
substituters = [
"https://cache.dynamicdiscord.de"
"https://cache.nixos.org/"
];
trusted-public-keys = [
"cache.dynamicdiscord.de:DKueZicqi2NhJJXz9MYgUbiyobMs10fTyHCgAUibRP4="
];
trusted-users = [ "root" "@wheel" ];
};
};
})
sops-nix.nixosModules.sops
#microvm.nixosModules.microvm
];
}
];
defaultModules = baseModules;
makeMicroVM = hostName: ipv4Addr: macAddr: modules: [
self.nixosModules.malobeo.metrics
{
microvm = {
hypervisor = "cloud-hypervisor";
mem = 2560;
shares = [
{
source = "/nix/store";
mountPoint = "/nix/.ro-store";
tag = "store";
proto = "virtiofs";
socket = "store.socket";
}
{
source = "/var/lib/microvms/${hostName}/etc";
mountPoint = "/etc";
tag = "etc";
proto = "virtiofs";
socket = "etc.socket";
}
{
source = "/var/lib/microvms/${hostName}/var";
mountPoint = "/var";
tag = "var";
proto = "virtiofs";
socket = "var.socket";
}
];
interfaces = [
{
type = "tap";
id = "vm-${hostName}";
mac = "${macAddr}";
}
];
};
malobeo.metrics = {
enable = true;
enablePromtail = true;
logNginx = false;
lokiHost = "10.0.0.14";
};
systemd.network.enable = true;
systemd.network.networks."20-lan" = {
matchConfig.Type = "ether";
networkConfig = {
Address = [ "${ipv4Addr}/24" ];
Gateway = "10.0.0.1";
DNS = ["1.1.1.1"];
DHCP = "no";
};
};
}
] ++ defaultModules ++ modules;
pkgs-unstable = nixpkgs-unstable.legacyPackages."${system}";
pkgs = nixpkgs.legacyPackages."${system}";
@@ -111,17 +20,7 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
mem = pkgs.lib.mkForce 4096;
hypervisor = pkgs.lib.mkForce "qemu";
socket = pkgs.lib.mkForce null;
#needed for hosts that deploy imperative microvms (for example fanny)
writableStoreOverlay = pkgs.lib.mkIf options.writableStore "/nix/.rw-store";
volumes = pkgs.lib.mkIf options.writableStore [ {
image = "nix-store-overlay.img";
mountPoint = self.nixosConfigurations.${hostname}.config.microvm.writableStoreOverlay;
size = 2048;
} ];
shares = pkgs.lib.mkForce (pkgs.lib.optionals (!options.writableStore) [
shares = pkgs.lib.mkForce ([
{
tag = "ro-store";
source = "/nix/store";
@@ -135,18 +34,11 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
tag = "var";
}
]);
interfaces = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [{
type = "user";
id = "eth0";
mac = "02:23:de:ad:be:ef";
}]);
#if networking is disabled forward port 80 to still have access to webservices
forwardPorts = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [
{ from = "host"; host.port = 8080; guest.port = 80; }
]);
};
fileSystems = {
@@ -194,59 +86,15 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
}];
};
buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: (self.nixosConfigurations.${host}.extendModules {
buildVM = host: networking: sopsDummy: disableDisko: varPath: (self.nixosConfigurations.${host}.extendModules {
modules = [
(vmMicroVMOverwrites host {
withNetworking = networking;
varPath = "${varPath}";
writableStore = writableStore; })
(vmMicroVMOverwrites host { withNetworking = networking; varPath = "${varPath}"; })
(if sopsDummy then (vmSopsOverwrites host) else {})
(if disableDisko then vmDiskoOverwrites else {})
] ++ pkgs.lib.optionals (! self.nixosConfigurations.${host}.config ? microvm) [
#microvm.nixosModules.microvm
] ++ pkgs.lib.optionals (self.nixosConfigurations.${host}.config ? services.malobeo.microvm.deployHosts) [
#microvm.nixosModules.host
{
services.malobeo.microvm.deployHosts = pkgs.lib.mkForce [];
systemd.tmpfiles.rules = builtins.concatLists (map (name: [
"q /var/lib/microvms/${name}/var 0755 root root - -"
"q /var/lib/microvms/${name}/etc 0755 root root - -"
"q /var/${name}/wow/it/works 0755 root root - -"
"q /var/lib/${name} 0755 root root - -"
"d /${name} 0755 root root - -"
]) self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
microvm.vms =
let
# Map the values to each hostname to then generate an Attrset using listToAttrs
mapperFunc = name: { inherit name; value = {
#pkgs = import self.nixosConfigurations.${name}.config.nixpkgs;
#pkgs = (buildVM name networking sopsDummy false "" false).config.nixpkgs;
#config = (buildVM name networking sopsDummy false "" false);
#pkgs = pkgs;
#config = self.nixosConfigurations.${name};
specialArgs.inputs = inputs;
specialArgs.self = self;
config = {
imports = (makeMicroVM "${name}" "10.0.0.11" "D0:E5:CA:F0:D7:E7" [
#(vmMicroVMOverwrites name {
# withNetworking = true;
# varPath = "";
# writableStore = false; })
(if sopsDummy then (vmSopsOverwrites name) else {})
]);
};
}; };
in
builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts);
}];
});
microvm.nixosModules.microvm
];
}).config.microvm.declaredRunner;
in
{
devShells.default =
@@ -309,7 +157,6 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
echo "--networking setup interfaces. requires root and hostbridge enabled on the host"
echo "--dummy-secrets run vm with dummy sops secrets"
echo "--no-disko disable disko and initrd secrets. needed for real hosts like fanny"
echo "--writable-store enables writable store. necessary for host with nested imperative microvms like fanny"
echo "--var path to directory that should be shared as /var. may require root otherwise some systemd units fail within vm. if dir is empty vm will populate"
exit 1
}
@@ -325,7 +172,6 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
NETWORK=false
DUMMY_SECRETS=false
NO_DISKO=false
RW_STORE=false
VAR_PATH=""
# check argws
@@ -335,7 +181,6 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
--networking) NETWORK=true ;;
--dummy-secrets) DUMMY_SECRETS=true ;;
--no-disko) NO_DISKO=true ;;
--writable-store) RW_STORE=true ;;
--var)
if [[ -n "$2" && ! "$2" =~ ^- ]]; then
VAR_PATH="$2"
@@ -353,12 +198,11 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
echo "enable networking: $NETWORK"
echo "deploy dummy secrets: $DUMMY_SECRETS"
echo "disable disko and initrd secrets: $NO_DISKO"
echo "use writable store: $RW_STORE"
if [ -n "$VAR_PATH" ]; then
echo "sharing var directory: $VAR_PATH"
fi
${pkgs.nix}/bin/nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\" $RW_STORE).config.microvm.declaredRunner"
${pkgs.nix}/bin/nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\")"
'';
};
@@ -396,7 +240,6 @@ in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems
microvm.imports = [ ./machines/modules/malobeo/microvm_host.nix ];
vpn.imports = [ ./machines/modules/malobeo/wireguard.nix ];
initssh.imports = [ ./machines/modules/malobeo/initssh.nix ];
metrics.imports = [ ./machines/modules/malobeo/metrics.nix ];
disko.imports = [ ./machines/modules/disko ];
};