{ self , utils , nixpkgs , nixpkgs-unstable , nixos-generators , sops-nix , microvm , ... } @inputs: # filter i686-liux from defaultSystem to run nix flake check successfully let filter_system = name: if name == utils.lib.system.i686-linux then false else true; in (utils.lib.eachSystem (builtins.filter filter_system utils.lib.defaultSystems) ( system: let baseModules = [ # make flake inputs accessiable in NixOS { _module.args.inputs = inputs; } { imports = [ ({ pkgs, ... }: { nix = { extraOptions = '' experimental-features = nix-command flakes ''; settings = { substituters = [ "https://cache.dynamicdiscord.de" "https://cache.nixos.org/" ]; trusted-public-keys = [ "cache.dynamicdiscord.de:DKueZicqi2NhJJXz9MYgUbiyobMs10fTyHCgAUibRP4=" ]; trusted-users = [ "root" "@wheel" ]; }; }; }) sops-nix.nixosModules.sops #microvm.nixosModules.microvm ]; } ]; defaultModules = baseModules; makeMicroVM = hostName: ipv4Addr: macAddr: modules: [ self.nixosModules.malobeo.metrics { microvm = { hypervisor = "cloud-hypervisor"; mem = 2560; shares = [ { source = "/nix/store"; mountPoint = "/nix/.ro-store"; tag = "store"; proto = "virtiofs"; socket = "store.socket"; } { source = "/var/lib/microvms/${hostName}/etc"; mountPoint = "/etc"; tag = "etc"; proto = "virtiofs"; socket = "etc.socket"; } { source = "/var/lib/microvms/${hostName}/var"; mountPoint = "/var"; tag = "var"; proto = "virtiofs"; socket = "var.socket"; } ]; interfaces = [ { type = "tap"; id = "vm-${hostName}"; mac = "${macAddr}"; } ]; }; malobeo.metrics = { enable = true; enablePromtail = true; logNginx = false; lokiHost = "10.0.0.14"; }; systemd.network.enable = true; systemd.network.networks."20-lan" = { matchConfig.Type = "ether"; networkConfig = { Address = [ "${ipv4Addr}/24" ]; Gateway = "10.0.0.1"; DNS = ["1.1.1.1"]; DHCP = "no"; }; }; } ] ++ defaultModules ++ modules; pkgs-unstable = nixpkgs-unstable.legacyPackages."${system}"; pkgs = nixpkgs.legacyPackages."${system}"; vmMicroVMOverwrites = hostname: options: { microvm = { mem = pkgs.lib.mkForce 4096; hypervisor = pkgs.lib.mkForce "qemu"; socket = pkgs.lib.mkForce null; #needed for hosts that deploy imperative microvms (for example fanny) writableStoreOverlay = pkgs.lib.mkIf options.writableStore "/nix/.rw-store"; volumes = pkgs.lib.mkIf options.writableStore [ { image = "nix-store-overlay.img"; mountPoint = self.nixosConfigurations.${hostname}.config.microvm.writableStoreOverlay; size = 2048; } ]; shares = pkgs.lib.mkForce (pkgs.lib.optionals (!options.writableStore) [ { tag = "ro-store"; source = "/nix/store"; mountPoint = "/nix/.ro-store"; } ] ++ pkgs.lib.optionals (options.varPath != "") [ { source = "${options.varPath}"; securityModel = "mapped"; mountPoint = "/var"; tag = "var"; } ]); interfaces = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [{ type = "user"; id = "eth0"; mac = "02:23:de:ad:be:ef"; }]); #if networking is disabled forward port 80 to still have access to webservices forwardPorts = pkgs.lib.mkIf (!options.withNetworking) (pkgs.lib.mkForce [ { from = "host"; host.port = 8080; guest.port = 80; } ]); }; fileSystems = { "/".fsType = pkgs.lib.mkForce "tmpfs"; # prometheus uses a memory mapped file which doesnt seem supported by 9p shares # therefore we mount a tmpfs inside the datadir "/var/lib/prometheus2/data" = pkgs.lib.mkIf (hostname == "overwatch" && options.varPath != "") (pkgs.lib.mkForce { fsType = pkgs.lib.mkForce "tmpfs"; }); }; boot.isContainer = pkgs.lib.mkForce false; services.timesyncd.enable = false; users.users.root.password = ""; services.getty.helpLine = '' Log in as "root" with an empty password. Use "reboot" to shut qemu down. ''; }; vmDiskoOverwrites = { boot.initrd = { secrets = pkgs.lib.mkForce {}; network.ssh.enable = pkgs.lib.mkForce false; }; malobeo.disks.enable = pkgs.lib.mkForce false; networking.hostId = "a3c3101f"; }; vmSopsOverwrites = host: { sops.defaultSopsFile = pkgs.lib.mkForce ./machines/${host}/dummy.yaml; environment.etc = { devHostKey = { source = ./machines/secrets/devkey_ed25519; mode = "0600"; }; }; services.openssh.hostKeys = [{ path = "/etc/devHostKey"; type = "ed25519"; }]; }; buildVM = host: networking: sopsDummy: disableDisko: varPath: writableStore: (self.nixosConfigurations.${host}.extendModules { modules = [ (vmMicroVMOverwrites host { withNetworking = networking; varPath = "${varPath}"; writableStore = writableStore; }) (if sopsDummy then (vmSopsOverwrites host) else {}) (if disableDisko then vmDiskoOverwrites else {}) ] ++ pkgs.lib.optionals (! self.nixosConfigurations.${host}.config ? microvm) [ #microvm.nixosModules.microvm ] ++ pkgs.lib.optionals (self.nixosConfigurations.${host}.config ? services.malobeo.microvm.deployHosts) [ #microvm.nixosModules.host { services.malobeo.microvm.deployHosts = pkgs.lib.mkForce []; systemd.tmpfiles.rules = builtins.concatLists (map (name: [ "q /var/lib/microvms/${name}/var 0755 root root - -" "q /var/lib/microvms/${name}/etc 0755 root root - -" "q /var/${name}/wow/it/works 0755 root root - -" "q /var/lib/${name} 0755 root root - -" "d /${name} 0755 root root - -" ]) self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts); microvm.vms = let # Map the values to each hostname to then generate an Attrset using listToAttrs mapperFunc = name: { inherit name; value = { #pkgs = import self.nixosConfigurations.${name}.config.nixpkgs; #pkgs = (buildVM name networking sopsDummy false "" false).config.nixpkgs; #config = (buildVM name networking sopsDummy false "" false); #pkgs = pkgs; #config = self.nixosConfigurations.${name}; specialArgs.inputs = inputs; specialArgs.self = self; config = { imports = (makeMicroVM "${name}" "10.0.0.11" "D0:E5:CA:F0:D7:E7" [ #(vmMicroVMOverwrites name { # withNetworking = true; # varPath = ""; # writableStore = false; }) (if sopsDummy then (vmSopsOverwrites name) else {}) ]); }; }; }; in builtins.listToAttrs (map mapperFunc self.nixosConfigurations.${host}.config.services.malobeo.microvm.deployHosts); }]; }); in { devShells.default = let sops = sops-nix.packages."${pkgs.system}"; microvmpkg = microvm.packages."${pkgs.system}"; installed = builtins.attrNames self.legacyPackages."${pkgs.system}".scripts; in pkgs.mkShell { sopsPGPKeyDirs = [ "./machines/secrets/keys/hosts" "./machines/secrets/keys/users" ]; nativeBuildInputs = [ sops.ssh-to-pgp sops.sops-import-keys-hook sops.sops-init-gpg-key pkgs.sops pkgs.age pkgs.python310Packages.grip pkgs.mdbook microvmpkg.microvm ]; packages = builtins.map (pkgName: self.legacyPackages."${pkgs.system}".scripts.${pkgName}) installed; shellHook = ''echo "Available scripts: ${builtins.concatStringsSep " " installed}"''; }; legacyPackages = { scripts.remote-install = pkgs.writeShellScriptBin "remote-install" (builtins.readFile ./scripts/remote-install-encrypt.sh); scripts.boot-unlock = pkgs.writeShellScriptBin "boot-unlock" (builtins.readFile ./scripts/unlock-boot.sh); scripts.run-vm = self.packages.${system}.run-vm; }; vmBuilder = buildVM; packages = { docs = pkgs.stdenv.mkDerivation { name = "malobeo-docs"; phases = [ "buildPhase" ]; buildInputs = [ pkgs.mdbook ]; inputs = pkgs.lib.sourceFilesBySuffices ./doc/. [ ".md" ".toml" ]; buildPhase = '' dest=$out/share/doc mkdir -p $dest cp -r --no-preserve=all $inputs/* ./ mdbook build ls cp -r ./book/* $dest ''; }; run-vm = pkgs.writeShellScriptBin "run-vm" '' usage() { echo "Usage: run-vm [--networking] [--dummy-secrets] [--no-disko]" echo "ATTENTION: This script must be run from the flakes root directory" echo "--networking setup interfaces. requires root and hostbridge enabled on the host" echo "--dummy-secrets run vm with dummy sops secrets" echo "--no-disko disable disko and initrd secrets. needed for real hosts like fanny" echo "--writable-store enables writable store. necessary for host with nested imperative microvms like fanny" echo "--var path to directory that should be shared as /var. may require root otherwise some systemd units fail within vm. if dir is empty vm will populate" exit 1 } # check at least one arg was given if [ "$#" -lt 1 ]; then usage fi HOSTNAME=$1 # Optionale Argumente NETWORK=false DUMMY_SECRETS=false NO_DISKO=false RW_STORE=false VAR_PATH="" # check argws shift while [[ "$#" -gt 0 ]]; do case $1 in --networking) NETWORK=true ;; --dummy-secrets) DUMMY_SECRETS=true ;; --no-disko) NO_DISKO=true ;; --writable-store) RW_STORE=true ;; --var) if [[ -n "$2" && ! "$2" =~ ^- ]]; then VAR_PATH="$2" shift else echo "Error: --var requires a non-empty string argument." usage fi ;; *) echo "Unknown argument: $1"; usage ;; esac shift done echo "starting host $HOSTNAME" echo "enable networking: $NETWORK" echo "deploy dummy secrets: $DUMMY_SECRETS" echo "disable disko and initrd secrets: $NO_DISKO" echo "use writable store: $RW_STORE" if [ -n "$VAR_PATH" ]; then echo "sharing var directory: $VAR_PATH" fi ${pkgs.nix}/bin/nix run --show-trace --impure --expr "((builtins.getFlake \"$(pwd)\").vmBuilder.x86_64-linux \"$HOSTNAME\" $NETWORK $DUMMY_SECRETS $NO_DISKO \"$VAR_PATH\" $RW_STORE).config.microvm.declaredRunner" ''; }; apps = { docs = { type = "app"; program = builtins.toString (pkgs.writeShellScript "docs" '' ${pkgs.xdg-utils}/bin/xdg-open "${self.packages.${system}.docs}/share/doc/index.html" ''); }; docsDev = { type = "app"; program = builtins.toString (pkgs.writeShellScript "docs" '' echo "needs to run from infrastuctre root folder" ${pkgs.mdbook}/bin/mdbook serve --open ./doc ''); }; run-vm = { type = "app"; program = "${self.packages.${system}.run-vm}/bin/run-vm"; }; }; })) // { nixosConfigurations = import ./machines/configuration.nix (inputs // { inherit inputs; self = self; }); nixosModules.malobeo = { host.imports = [ ./machines/durruti/host_config.nix ]; microvm.imports = [ ./machines/modules/malobeo/microvm_host.nix ]; vpn.imports = [ ./machines/modules/malobeo/wireguard.nix ]; initssh.imports = [ ./machines/modules/malobeo/initssh.nix ]; metrics.imports = [ ./machines/modules/malobeo/metrics.nix ]; disko.imports = [ ./machines/modules/disko ]; }; hydraJobs = nixpkgs.lib.mapAttrs (_: nixpkgs.lib.hydraJob) ( let getBuildEntry = name: nixosSystem: if (nixpkgs.lib.hasPrefix "sdImage" name) then nixosSystem.config.system.build.sdImage else nixosSystem.config.system.build.toplevel; in nixpkgs.lib.mapAttrs getBuildEntry self.nixosConfigurations ); }