13 Commits

Author SHA1 Message Date
ahtlon
d4ef6381a0 i don't know if this should be used
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m9s
Since only passing though the ssh dir doesn't work, the host would be polluted by all the other hosts writing to etc
2025-01-18 17:39:28 +01:00
ahtlon
c416f27c81 add note to docs 2025-01-18 16:35:34 +01:00
ahtlon
0d61107515 Try constant test keys 2025-01-18 16:34:24 +01:00
d8d910f5fd [uptimekuma] mv from fanny to hetzner server
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m7s
after thinking about it it makes no sense to have status/alerting
running on fanny. as soon as fanny fails we wont get any alerts anymore.
thats why i think having it running on the hetzner server, which is
quite stable, makes sense
2025-01-17 14:19:38 +01:00
a4f6b77e30 [fanny] deploy uptimekuma
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m21s
2025-01-17 14:00:41 +01:00
6aa6f2e171 [uptimekuma] set redirects 2025-01-17 13:59:54 +01:00
d9bb933891 [uptimekuma] init 2025-01-17 13:59:35 +01:00
168d45ed8a [vpn] set mtu 1340
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 5m47s
2025-01-17 00:29:11 +01:00
2f477d3566 [fanny] undo proxy settings
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m18s
2025-01-17 00:19:23 +01:00
b40cb40b01 [fanny] try fix incomplete file transfer
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m5s
2025-01-16 19:30:49 +01:00
b15b2ae789 [fanny] disable proxy_buffer
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m2s
2025-01-16 16:36:38 +01:00
c7b02b9366 [vpn] disable proxy_buffer
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 4m5s
url http://10.100.0.101:80/css/variables.css only returns half the file
hopefully this fixes it
2025-01-16 16:26:23 +01:00
c78eb9cbc1 [fanny][vpn] open port 80, enable nginx
All checks were successful
Evaluate Hydra Jobs / eval-hydra-jobs (push) Successful in 5m49s
2025-01-16 14:24:19 +01:00
8 changed files with 107 additions and 13 deletions

View File

@@ -44,6 +44,9 @@ sudo mkdir -p /var/lib/microvms/durruti/{var,etc}
# alternatively u can run the vm in interactive mode (maybe stop the microvm@durruti.service first) # alternatively u can run the vm in interactive mode (maybe stop the microvm@durruti.service first)
microvm -r durruti microvm -r durruti
#if you get an error like "Error booting VM: VmBoot(DeviceManager(CreateVirtioFs(VhostUserConnect)))", try starting the virtio service manually
sudo systemctl start microvm-virtiofsd@{host}.service
# after u made changes to the microvm update and restart the vm # after u made changes to the microvm update and restart the vm
microvm -uR durruti microvm -uR durruti

View File

@@ -56,11 +56,11 @@ let
socket = "store.socket"; socket = "store.socket";
} }
{ {
source = "/var/lib/microvms/${hostName}/etc"; source = "/var/lib/microvms/test/etc/";
mountPoint = "/etc"; mountPoint = "/etc";
tag = "etc"; tag = "etcssh";
proto = "virtiofs"; proto = "virtiofs";
socket = "etc.socket"; socket = "etcssh.socket";
} }
{ {
source = "/var/lib/microvms/${hostName}/var"; source = "/var/lib/microvms/${hostName}/var";
@@ -114,6 +114,15 @@ in
]; ];
}; };
lucia = nixosSystem {
system = "aarch64-linux";
specialArgs.inputs = inputs;
modules = defaultModules ++ [
./lucia/configuration.nix
./lucia/hardware_configuration.nix
];
};
fanny = nixosSystem { fanny = nixosSystem {
system = "x86_64-linux"; system = "x86_64-linux";
specialArgs.inputs = inputsMod; specialArgs.inputs = inputsMod;
@@ -152,12 +161,12 @@ in
]; ];
}; };
lucia = nixosSystem { uptimekuma = nixosSystem {
system = "aarch64-linux"; system = "x86_64-linux";
specialArgs.inputs = inputs; specialArgs.inputs = inputs;
modules = defaultModules ++ [ specialArgs.self = self;
./lucia/configuration.nix modules = makeMicroVM "uptimekuma" "10.0.0.12" "D0:E5:CA:F0:D7:E8" [
./lucia/hardware_configuration.nix ./uptimekuma/configuration.nix
]; ];
}; };

View File

@@ -8,6 +8,15 @@
{ addr = "0.0.0.0"; port = 9000; } { addr = "0.0.0.0"; port = 9000; }
]; ];
root = "${self.packages.x86_64-linux.docs}/share/doc"; root = "${self.packages.x86_64-linux.docs}/share/doc";
extraConfig = ''
proxy_buffering off;
proxy_cache off;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
'';
}; };
}; };

View File

@@ -36,7 +36,21 @@ in
services.nginx.virtualHosts."docs.malobeo.org" = { services.nginx.virtualHosts."docs.malobeo.org" = {
forceSSL = true; forceSSL = true;
enableACME= true; enableACME= true;
locations."/".proxyPass = "http://10.0.0.10"; locations."/" = {
proxyPass = "http://10.0.0.10";
extraConfig = ''
'';
};
};
services.nginx.virtualHosts."status.malobeo.org" = {
forceSSL = true;
enableACME= true;
locations."/" = {
proxyPass = "http://10.0.0.12";
extraConfig = ''
'';
};
}; };
services.nginx.virtualHosts."tasklist.malobeo.org" = { services.nginx.virtualHosts."tasklist.malobeo.org" = {

View File

@@ -55,9 +55,21 @@ in
services.malobeo.microvm.enableHostBridge = true; services.malobeo.microvm.enableHostBridge = true;
services.malobeo.microvm.deployHosts = [ "infradocs" ]; services.malobeo.microvm.deployHosts = [ "infradocs" ];
networking = {
firewall = {
allowedTCPPorts = [ 80 ];
};
};
services.nginx.virtualHosts."docs.malobeo.org" = { services.nginx = {
locations."/".proxyPass = "http://10.0.0.11:9000"; enable = true;
virtualHosts."docs.malobeo.org" = {
locations."/" = {
proxyPass = "http://10.0.0.11:9000";
extraConfig = ''
'';
};
};
}; };
services.tor = { services.tor = {

View File

@@ -69,6 +69,7 @@ in
networking.wg-quick = { networking.wg-quick = {
interfaces = { interfaces = {
malovpn = { malovpn = {
mtu = 1340; #seems to be necessary to proxypass nginx traffic through vpn
address = myPeer.address; address = myPeer.address;
autostart = cfg.autostart; autostart = cfg.autostart;
listenPort = mkIf (myPeer.role == "server") myPeer.listenPort; listenPort = mkIf (myPeer.role == "server") myPeer.listenPort;

View File

@@ -0,0 +1,37 @@
{ config, lib, pkgs, inputs, ... }:
with lib;
{
networking = {
hostName = mkDefault "uptimekuma";
useDHCP = false;
nameservers = [ "1.1.1.1" ];
};
imports = [
../modules/malobeo_user.nix
../modules/sshd.nix
];
networking.firewall.allowedTCPPorts = [ 80 ];
services.nginx = {
enable = true;
virtualHosts."status.malobeo.org" = {
locations."/" = {
proxyPass = "http://127.0.0.1:3001";
extraConfig = ''
'';
};
};
};
services.uptime-kuma = {
enable = true;
};
system.stateVersion = "22.11"; # Did you read the comment?
}

View File

@@ -12,6 +12,7 @@ with lib;
nameservers = [ "1.1.1.1" ]; nameservers = [ "1.1.1.1" ];
firewall = { firewall = {
allowedUDPPorts = [ 51821 ]; allowedUDPPorts = [ 51821 ];
allowedTCPPorts = [ 80 ];
}; };
}; };
@@ -27,8 +28,16 @@ with lib;
privateKeyFile = config.sops.secrets.wg_private.path; privateKeyFile = config.sops.secrets.wg_private.path;
}; };
services.nginx.virtualHosts."docs.malobeo.org" = { services.nginx = {
locations."/".proxyPass = "http://10.100.0.101"; enable = true;
virtualHosts."docs.malobeo.org" = {
locations."/" = {
proxyPass = "http://10.100.0.101";
extraConfig = ''
'';
};
};
}; };
system.stateVersion = "22.11"; # Did you read the comment? system.stateVersion = "22.11"; # Did you read the comment?