Compare commits

..

7 commits

32 changed files with 1094 additions and 578 deletions

View file

@ -3,6 +3,21 @@
zones = {
"gmem.ca" =
{
"_minecraft._tcp.mc" = {
srv.data = [
{
port = 25565;
priority = 10;
weight = 5;
target = "mc-real.gmem.ca";
}
];
};
"mc".cname = {
ttl = 0;
data = "e9195a3e-6f24-4cdb-be6d-237199d9a258.cfargotunnel.com";
};
"grafana" = {
a.data = ["91.107.206.145"];
aaaa.data = ["2a01:4f8:c012:5ec6::"];
@ -89,6 +104,9 @@
a.data = ["100.116.48.47"];
aaaa.data = ["fd7a:115c:a1e0:ab12:4843:cd96:6274:302f"];
};
"plex" = {
cname.data = "vancouver.gmem.ca";
};
"" = {
aaaa.data = ["100::"];
@ -147,6 +165,7 @@
"ntfy"
"metube"
"search"
"red"
] (name: {cname.data = "cluster.gmem.ca";})
// lib.attrsets.genAttrs [
# Externally hosted applications with Tunnels
@ -161,6 +180,8 @@
"tokyo"
"ci"
"paste"
"e6"
"minecraft-invites"
] (name: {
cname = {
ttl = 0;

View file

@ -10,6 +10,8 @@
"tokyo"
"nitter"
"paste"
"e6"
"minecraft-invites"
] (name: {
name = name + ".gmem.ca";
content = "newcluster.gmem.ca";
@ -41,13 +43,16 @@
"metube"
"search"
"paste"
"e6"
"red"
"minecraft-invites"
] (name: {
name = name + ".gmem.ca";
content = "homelab.gmem.ca";
});
in {
data = {
"xxxxxx" = home;
"xxxxxx" = tailscale;
"bcee89" = home;
"74c6db" = tailscale;
};
}

View file

@ -94,6 +94,26 @@
"type": "github"
}
},
"emacs-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_3",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1717664893,
"narHash": "sha256-k79hmHv7Q1/FZSqBzNqmLAU6WGICKPFN6QcCX0QM8Og=",
"owner": "nix-community",
"repo": "emacs-overlay",
"rev": "28779a7abf781d387806f2567b578af6fd165705",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "emacs-overlay",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
@ -128,11 +148,11 @@
},
"flake-compat_3": {
"locked": {
"lastModified": 1688025799,
"narHash": "sha256-ktpB4dRtnksm9F5WawoIkEneh1nrEvuxb5lJFt1iOyw=",
"lastModified": 1717312683,
"narHash": "sha256-FrlieJH50AuvagamEvWMIE6D2OAnERuDboFDYAED/dE=",
"owner": "nix-community",
"repo": "flake-compat",
"rev": "8bf105319d44f6b9f0d764efa4fdef9f1cc9ba1c",
"rev": "38fd3954cf65ce6faf3d0d45cd26059e059f07ea",
"type": "github"
},
"original": {
@ -180,7 +200,7 @@
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
@ -198,7 +218,7 @@
},
"flake-utils_3": {
"inputs": {
"systems": "systems_4"
"systems": "systems_3"
},
"locked": {
"lastModified": 1710146030,
@ -215,6 +235,24 @@
}
},
"flake-utils_4": {
"inputs": {
"systems": "systems_5"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_5": {
"locked": {
"lastModified": 1634851050,
"narHash": "sha256-N83GlSGPJJdcqhUxSCS/WwW5pksYf3VP1M13cDRTSVA=",
@ -272,11 +310,11 @@
]
},
"locked": {
"lastModified": 1715930644,
"narHash": "sha256-W9pyM3/vePxrffHtzlJI6lDS3seANQ+Nqp+i58O46LI=",
"lastModified": 1717525419,
"narHash": "sha256-5z2422pzWnPXHgq2ms8lcCfttM0dz+hg+x1pCcNkAws=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "e3ad5108f54177e6520535768ddbf1e6af54b59d",
"rev": "a7117efb3725e6197dd95424136f79147aa35e5b",
"type": "github"
},
"original": {
@ -288,16 +326,16 @@
"kubenix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": "nixpkgs_3",
"systems": "systems",
"nixpkgs": "nixpkgs_4",
"systems": "systems_2",
"treefmt": "treefmt"
},
"locked": {
"lastModified": 1715211269,
"narHash": "sha256-bO1n41QjfdFNoEih0csMe/MUB42DdOuwlT+6LGpUCSc=",
"lastModified": 1717524369,
"narHash": "sha256-OR0IaHPh6dHrpwTJJdq9IMvJyY6/OQWmS4FEk38Qlm4=",
"owner": "hall",
"repo": "kubenix",
"rev": "060f4757292e1e7172cc9ebcb16f38d89cb707ab",
"rev": "b5dc95c847893857f02579118f7dfb37b580746e",
"type": "github"
},
"original": {
@ -308,15 +346,15 @@
},
"lib-aggregate": {
"inputs": {
"flake-utils": "flake-utils_3",
"flake-utils": "flake-utils_4",
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1715515815,
"narHash": "sha256-yaLScMHNFCH6SbB0HSA/8DWDgK0PyOhCXoFTdHlWkhk=",
"lastModified": 1717330178,
"narHash": "sha256-rRZjmC3xcPpHTJHnEy3T99O86Ecjao5YhakzaoNiRcs=",
"owner": "nix-community",
"repo": "lib-aggregate",
"rev": "09883ca828e8cfaacdb09e29190a7b84ad1d9925",
"rev": "64d43e2bbc6eab8d1cbdfba96d90a71e15a847d7",
"type": "github"
},
"original": {
@ -344,7 +382,7 @@
},
"lix-module": {
"inputs": {
"flake-utils": "flake-utils_2",
"flake-utils": "flake-utils_3",
"flakey-profile": "flakey-profile",
"lix": [
"lix"
@ -354,11 +392,11 @@
]
},
"locked": {
"lastModified": 1715885250,
"narHash": "sha256-IUFYAl3158Ig5vySnRBHoPReb2/S97bjodCo6FhzJv4=",
"lastModified": 1717647344,
"narHash": "sha256-m8XYt8NU2T4gvkien7H7LFGXHhSA5z4tHOeuXQ3DJi4=",
"ref": "refs/heads/main",
"rev": "53d713eb486f21d653af3ef3528e9a19ecfc45e5",
"revCount": 81,
"rev": "4e25f1ab68f2270f9cff59216056c21073db0164",
"revCount": 87,
"type": "git",
"url": "https://git.lix.systems/lix-project/nixos-module"
},
@ -371,7 +409,7 @@
"inputs": {
"flake-parts": "flake-parts",
"nix-github-actions": "nix-github-actions",
"nixpkgs": "nixpkgs_6",
"nixpkgs": "nixpkgs_7",
"treefmt-nix": "treefmt-nix_2"
},
"locked": {
@ -412,7 +450,7 @@
},
"nixinate": {
"inputs": {
"nixpkgs": "nixpkgs_4"
"nixpkgs": "nixpkgs_5"
},
"locked": {
"lastModified": 1708891350,
@ -449,7 +487,7 @@
"nixpkgs": [
"nixpkgs"
],
"systems": "systems_3",
"systems": "systems_4",
"treefmt-nix": "treefmt-nix"
},
"locked": {
@ -474,11 +512,11 @@
]
},
"locked": {
"lastModified": 1713783234,
"narHash": "sha256-3yh0nqI1avYUmmtqqTW3EVfwaLE+9ytRWxsA5aWtmyI=",
"lastModified": 1716210724,
"narHash": "sha256-iqQa3omRcHGpWb1ds75jS9ruA5R39FTmAkeR3J+ve1w=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "722b512eb7e6915882f39fff0e4c9dd44f42b77e",
"rev": "d14b286322c7f4f897ca4b1726ce38cb68596c94",
"type": "github"
},
"original": {
@ -489,11 +527,11 @@
},
"nixos-hardware": {
"locked": {
"lastModified": 1716034089,
"narHash": "sha256-QBfab6V4TeQ6Y4NiXVrEATdQuhCNFNaXt/L1K/Zw+zc=",
"lastModified": 1717574423,
"narHash": "sha256-cz3P5MZffAHwL2IQaNzsqUBsJS+u0J/AAwArHMAcCa0=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "b55712de78725c8fcde422ee0a0fe682046e73c3",
"rev": "d6c6cf6f5fead4057d8fb2d5f30aa8ac1727f177",
"type": "github"
},
"original": {
@ -521,11 +559,11 @@
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1715474941,
"narHash": "sha256-CNCqCGOHdxuiVnVkhTpp2WcqSSmSfeQjubhDOcgwGjU=",
"lastModified": 1717289404,
"narHash": "sha256-4q6ZO3BqHgdd3Aacb/xiQXB4g9TQKpQg/praTpD9vbI=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "58e03b95f65dfdca21979a081aa62db0eed6b1d8",
"rev": "e090cb30ae82f4b4461aafdb808847c6c97b08c2",
"type": "github"
},
"original": {
@ -534,6 +572,22 @@
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1717530100,
"narHash": "sha256-b4Dn+PnrZoVZ/BoR9JN2fTxXxplJrAsdSUIePf4Cacs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "a2e1d0414259a144ebdc048408a807e69e0565af",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-wayland": {
"inputs": {
"flake-compat": "flake-compat_3",
@ -544,11 +598,11 @@
]
},
"locked": {
"lastModified": 1716052422,
"narHash": "sha256-9zObaIzZ3OnW4nMdNzMmrjUrGhqhAZhn1VQnxWUlKts=",
"lastModified": 1717669106,
"narHash": "sha256-C7jLK3KgTbGBQcpRsu1qivSoSfkp7PaWI+tLfo9qHHY=",
"owner": "nix-community",
"repo": "nixpkgs-wayland",
"rev": "0c6afa4c3c068730a90ce20762bf0fdfac23e64b",
"rev": "27f970b56d7de3b7214b6017cec7f149656448a1",
"type": "github"
},
"original": {
@ -574,6 +628,22 @@
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1717196966,
"narHash": "sha256-yZKhxVIKd2lsbOqYd5iDoUIwsRZFqE87smE2Vzf6Ck0=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "57610d2f8f0937f39dbd72251e9614b1561942d8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1686488075,
"narHash": "sha256-2otSBt2hbeD+5yY25NF3RhWx7l5SDt1aeU3cJ/9My4M=",
@ -589,7 +659,7 @@
"type": "github"
}
},
"nixpkgs_4": {
"nixpkgs_5": {
"locked": {
"lastModified": 1653060744,
"narHash": "sha256-kfRusllRumpt33J1hPV+CeCCylCXEU7e0gn2/cIM7cY=",
@ -605,13 +675,13 @@
"type": "github"
}
},
"nixpkgs_5": {
"nixpkgs_6": {
"locked": {
"lastModified": 1715996989,
"narHash": "sha256-ObD9YSelkwCAylEXJHcNjrn3hLOfIVScB1tPz9zeDN8=",
"lastModified": 1717459389,
"narHash": "sha256-I8/plBsua4/NZ5bKgj+z7/ThiWuud1YFwLsn1QQ5PgE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "63d3e5d82edf5a138e7d0872231cc23ed4e740fd",
"rev": "3b01abcc24846ae49957b30f4345bab4b3f1d14b",
"type": "github"
},
"original": {
@ -621,7 +691,7 @@
"type": "github"
}
},
"nixpkgs_6": {
"nixpkgs_7": {
"locked": {
"lastModified": 1715037484,
"narHash": "sha256-OUt8xQFmBU96Hmm4T9tOWTu4oCswCzoVl+pxSq/kiFc=",
@ -637,7 +707,7 @@
"type": "github"
}
},
"nixpkgs_7": {
"nixpkgs_8": {
"locked": {
"lastModified": 1636823747,
"narHash": "sha256-oWo1nElRAOZqEf90Yek2ixdHyjD+gqtS/pAgwaQ9UhQ=",
@ -656,6 +726,7 @@
"inputs": {
"agenix": "agenix",
"alertmanager-ntfy": "alertmanager-ntfy",
"emacs-overlay": "emacs-overlay",
"home-manager": "home-manager_2",
"kubenix": "kubenix",
"lix": "lix",
@ -664,7 +735,7 @@
"nixos-dns": "nixos-dns",
"nixos-generators": "nixos-generators",
"nixos-hardware": "nixos-hardware",
"nixpkgs": "nixpkgs_5",
"nixpkgs": "nixpkgs_6",
"nixpkgs-wayland": "nixpkgs-wayland",
"terranix": "terranix"
}
@ -679,11 +750,26 @@
"type": "github"
},
"original": {
"id": "systems",
"type": "indirect"
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"id": "systems",
"type": "indirect"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
@ -698,7 +784,7 @@
"type": "github"
}
},
"systems_3": {
"systems_4": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
@ -712,7 +798,7 @@
"type": "indirect"
}
},
"systems_4": {
"systems_5": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
@ -731,8 +817,8 @@
"inputs": {
"bats-assert": "bats-assert",
"bats-support": "bats-support",
"flake-utils": "flake-utils_4",
"nixpkgs": "nixpkgs_7",
"flake-utils": "flake-utils_5",
"nixpkgs": "nixpkgs_8",
"terranix-examples": "terranix-examples"
},
"locked": {

View file

@ -31,6 +31,7 @@
inputs.lix.follows = "lix";
inputs.nixpkgs.follows = "nixpkgs";
};
emacs-overlay.url = "github:nix-community/emacs-overlay";
};
outputs = {
@ -47,9 +48,14 @@
kubenix,
nixos-dns,
nixos-hardware,
emacs-overlay,
...
} @ inputs: let
pkgs = nixpkgs.legacyPackages.x86_64-linux;
system = "x86_64-linux";
pkgs = import nixpkgs {
inherit system;
overlays = [emacs-overlay.overlays.default];
};
tf = terranix.lib.terranixConfiguration {
system = "x86_64-linux";
modules = [./terraform/main.nix];
@ -96,6 +102,7 @@
kubernetes-helm
nil
talosctl
dogdns
(octodns.withProviders (ps: [
octodns-providers.bind
octodns-cloudflare
@ -412,6 +419,22 @@
}
];
};
minecraft-server = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
agenix.nixosModules.default
(import ./nix/minecraft-server/configuration.nix)
{
_module.args.nixinate = {
host = "192.168.50.13";
sshUser = "root";
buildOn = "remote";
substituteOnTarget = true;
hermetic = false;
};
}
];
};
};
};
}

View file

@ -93,25 +93,31 @@ data:
credentials-file: /etc/cloudflared/creds/credentials.json
metrics: 0.0.0.0:2000
no-autoupdate: true
warp-routing:
enabled: true
ingress:
- hostname: photos.gmem.ca
service: http://immich-server.immich.svc.cluster.local:3001
service: https://homelab.gmem.ca
- hostname: pw.gmem.ca
service: http://vaultwarden.vaultwarden.svc.cluster.local:80
service: https://homelab.gmem.ca
- hostname: authentik.gmem.ca
service: http://authentik-server.authentik.svc.cluster.local:80
service: https://homelab.gmem.ca
- hostname: nitter.gmem.ca
service: http://nitter.nitter.svc.cluster.local:8081
service: https://homelab.gmem.ca
- hostname: git.gmem.ca
service: http://192.168.50.229
service: https://homelab.gmem.ca
- hostname: proxmox.gmem.ca
service: http://proxmox.endpoints.svc.cluster.local:8006
service: https://homelab.gmem.ca
- hostname: tokyo.gmem.ca
service: http://tokyo.endpoints.svc.cluster.local:8000
service: https://homelab.gmem.ca
- hostname: ibiza.gmem.ca
service: http://ibiza.endpoints.svc.cluster.local:8000
service: https://homelab.gmem.ca
- hostname: chat.gmem.ca
service: tcp://192.168.50.45:443
service: https://homelab.gmem.ca
- hostname: paste.gmem.ca
service: http://tclip.tclip.svc.cluster.local:8080
service: https://homelab.gmem.ca
- hostname: e6.gmem.ca
service: https://homelab.gmem.ca
- hostname: minecraft-invites.gmem.ca
service: https://homelab.gmem.ca
- service: http_status:404

View file

@ -1,5 +1,11 @@
let
endpoints = {
"git" = {
location = "192.168.50.229";
host = "git.gmem.ca";
port = 443;
protocol = "HTTPS";
};
"proxmox" = {
location = "192.168.50.3";
host = "proxmox.gmem.ca";

View file

@ -69,37 +69,37 @@ in {
name = "postgres-soju";
key = "password";
};
env.PGUSER.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "user";
};
env.PGDATABASE.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "dbname";
env.PGUSER.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "user";
};
env.PGDATABASE.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "dbname";
};
};
};
};
};
};
};
};
kubernetes.resources.deployments.gamja = {
metadata.namespace = "irc";
metadata.namespace = "irc";
spec = {
selector.matchLabels.app = "gamja";
template = {
metadata.labels.app = "gamja";
spec = {
containers = {
gamja = {
image = gamjaImage;
imagePullPolicy = "Always";
ports.http.containerPort = 80;
selector.matchLabels.app = "gamja";
template = {
metadata.labels.app = "gamja";
spec = {
containers = {
gamja = {
image = gamjaImage;
imagePullPolicy = "Always";
ports.http.containerPort = 80;
};
};
};
};
};
};
};
kubernetes.resources.ingresses.irc = {
@ -144,14 +144,15 @@ in {
kubernetes.resources.configMaps.soju = {
metadata.namespace = "irc";
data.config = ''
listen ircs://
listen unix+admin:///app/admin
listen ws+insecure://
hostname irc.gmem.ca
title irc.gmem.ca
db postgres "dbname=soju"
message-store db
tls /ssl/tls.crt /ssl/tls.key
'';
listen ircs://
listen unix+admin:///app/admin
listen ws+insecure://
listen http+prometheus://localhost:9090
hostname irc.gmem.ca
title irc.gmem.ca
db postgres "dbname=soju"
message-store db
tls /ssl/tls.crt /ssl/tls.key
'';
};
}

View file

@ -28,5 +28,7 @@
# (import ./snikket.nix)
(import ./metube.nix)
(import ./searxng.nix)
(import ./redlib.nix)
(import ./minecraft-invites.nix)
];
}

View file

@ -60,5 +60,4 @@ in {
];
};
};
}

View file

@ -0,0 +1,125 @@
let
appName = "whitelistmanager";
appImage = "git.gmem.ca/arch/whitelistmanager";
frontendImage = "git.gmem.ca/arch/whitelistmanager-frontend";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.whitelistmanager = {
metadata.namespace = "minecraft-invites";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
whitelistmanager = {
image = appImage;
envFrom = [{secretRef.name = "whitelistmanager";}];
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 8080;
};
};
};
};
};
};
kubernetes.resources.deployments.whitelistmanager-frontend = {
metadata.namespace = "minecraft-invites";
spec = {
selector.matchLabels.app = appName + "-frontend";
template = {
metadata.labels.app = appName + "-frontend";
spec = {
containers = {
whitelistmanager = {
image = frontendImage;
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 3000;
};
};
};
};
};
};
kubernetes.resources.services.whitelistmanager = {
metadata.namespace = "minecraft-invites";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.services.whitelistmanager-frontend = {
metadata.namespace = "minecraft-invites";
metadata.labels.app = appName + "-frontend";
spec = {
selector.app = appName + "-frontend";
ports.http = {
port = 3000;
targetPort = 3000;
};
};
};
kubernetes.resources.ingresses.whitelistmanager = {
metadata.namespace = "minecraft-invites";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
spec = {
tls = [
{
hosts = ["minecraft-invites.gmem.ca"];
}
];
rules = [
{
host = "minecraft-invites.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "whitelistmanager-frontend";
port.number = 3000;
};
}
{
path = "/api";
pathType = "Prefix";
backend.service = {
name = "whitelistmanager";
port.number = 8080;
};
}
];
}
];
};
};
}

View file

@ -2,102 +2,104 @@ let
appName = "miniflux";
appImage = "docker.io/miniflux/miniflux";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
miniflux = {
image = appImage;
envFrom = [{secretRef.name = "miniflux";}
{configMapRef.name = "miniflux";}];
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
miniflux = {
image = appImage;
envFrom = [
{secretRef.name = "miniflux";}
{configMapRef.name = "miniflux";}
];
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 8080;
};
ports.http.containerPort = 8080;
};
};
};
};
};
};
kubernetes.resources.services.miniflux = {
metadata.namespace = "miniflux";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
kubernetes.resources.services.miniflux = {
metadata.namespace = "miniflux";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
};
kubernetes.resources.ingresses.miniflux = {
metadata.namespace = "miniflux";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
kubernetes.resources.ingresses.miniflux = {
metadata.namespace = "miniflux";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
spec = {
tls = [
{
hosts = ["rss.gmem.ca"];
}
];
rules = [
{
host = "rss.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "miniflux";
port.number = 8080;
};
}
];
}
];
};
};
spec = {
tls = [
{
hosts = ["rss.gmem.ca"];
}
];
rules = [
{
host = "rss.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "miniflux";
port.number = 8080;
};
}
];
}
];
};
};
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
endpoints = [
{
port = "http";
interval = "60s";
}
];
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
endpoints = [
{
port = "http";
interval = "60s";
}
];
};
};
};
kubernetes.resources.configMaps.miniflux = {
metadata.namespace = "miniflux";
data = {
CLEANUP_ARCHIVE_UNREAD_DAYS = "60";
METRICS_COLLECTOR = "1";
METRICS_ALLOWED_NETWORKS = "0.0.0.0/0";
BASE_URL = "https://rss.gmem.ca/";
kubernetes.resources.configMaps.miniflux = {
metadata.namespace = "miniflux";
data = {
CLEANUP_ARCHIVE_UNREAD_DAYS = "60";
METRICS_COLLECTOR = "1";
METRICS_ALLOWED_NETWORKS = "0.0.0.0/0";
BASE_URL = "https://rss.gmem.ca/";
};
};
};
}
}

View file

@ -32,6 +32,7 @@
];
ingressClassResource.default = true;
publishService.enabled = true;
service.type = "NodePort";
service.externalTrafficPolicy = "Local";
hostNetwork = true;
extraArgs.default-ssl-certificate = "cert-manager/gmem-ca-wildcard";

View file

@ -2,33 +2,36 @@ let
appName = "nitter-bot";
appImage = "git.gmem.ca/arch/nitter-bot:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.statefulSets.nitter-bot = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
nitter-bot = {
image = appImage;
envFrom = [{secretRef.name = "nitter-bot";}
{configMapRef.name = "nitter-bot";}];
resources = {
requests = {
cpu = "1m";
memory = "32Mi";
};
limits = {
cpu = "1";
memory = "128Mi";
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.statefulSets.nitter-bot = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
nitter-bot = {
image = appImage;
envFrom = [
{secretRef.name = "nitter-bot";}
{configMapRef.name = "nitter-bot";}
];
resources = {
requests = {
cpu = "1m";
memory = "32Mi";
};
limits = {
cpu = "1";
memory = "128Mi";
};
};
};
};
@ -36,13 +39,12 @@ in
};
};
};
};
kubernetes.resources.configMaps.nitter-bot = {
metadata.namespace = "nitter";
data = {
NITTER_URL = "http://nitter:8080";
NITTER_EXTERNAL_URL = "https://nitter.gmem.ca";
kubernetes.resources.configMaps.nitter-bot = {
metadata.namespace = "nitter";
data = {
NITTER_URL = "http://nitter:8080";
NITTER_EXTERNAL_URL = "https://nitter.gmem.ca";
};
};
};
}
}

View file

@ -25,53 +25,53 @@ in
kubernetes.resources.deployments.nitter = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "nitter";
accounts.secret.secretName = "nitter";
};
containers = {
nitter = {
image = nitterImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/src/nitter.conf";
subPath = "nitter.conf";
}
{
name = "accounts";
mountPath = "/src/guest_accounts.json";
subPath = "guest_accounts.json";
}
];
ports.http.containerPort = 8080;
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "nitter";
accounts.secret.secretName = "nitter";
};
nitter-ro = {
image = nitterImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/src/nitter.conf";
subPath = "nitter-ro.conf";
}
{
name = "accounts";
mountPath = "/src/guest_accounts.json";
subPath = "guest_accounts.json";
}
];
ports.http.containerPort = 8081;
containers = {
nitter = {
image = nitterImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/src/nitter.conf";
subPath = "nitter.conf";
}
{
name = "accounts";
mountPath = "/src/guest_accounts.json";
subPath = "guest_accounts.json";
}
];
ports.http.containerPort = 8080;
};
nitter-ro = {
image = nitterImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/src/nitter.conf";
subPath = "nitter-ro.conf";
}
{
name = "accounts";
mountPath = "/src/guest_accounts.json";
subPath = "guest_accounts.json";
}
];
ports.http.containerPort = 8081;
};
};
};
};
};
};
};
kubernetes.helm.releases.nitter-redis = {
namespace = "nitter";

View file

@ -25,8 +25,10 @@ in {
containers = {
jellyseerr = {
image = appImage;
envFrom = [{secretRef.name = "jellyseerr";}
{configMapRef.name = "jellyseerr";}];
envFrom = [
{secretRef.name = "jellyseerr";}
{configMapRef.name = "jellyseerr";}
];
volumeMounts = [
{
name = "config";

View file

@ -73,29 +73,29 @@
kubernetes.resources.cronJobs.piped-refresh = {
metadata.namespace = "piped";
spec = {
schedule = "*/30 * * * *";
jobTemplate.spec.template.spec = {
restartPolicy = "Never";
containers.refresh-subscriptions = {
image = "debian:bookworm-slim";
envFrom = [{secretRef.name = "postgres-piped";}];
command = [
"/bin/bash"
"-c"
''
apt update && apt install -y postgresql-client curl
curl -o /dev/null "https://healthchecks.gmem.ca/ping/$HEALTHCHECKS_UUID/start"
export PGPASSWORD=$password &&
export subs=$(psql -U piped -h 192.168.50.236 -qtAX -c 'select id from public.pubsub;') &&
while IFS= read -r line; do
echo "refreshing $line"
curl -k -o /dev/null "http://piped-backend:8080/channel/$line"
done < <(printf '%s' "$subs")
curl -o /dev/null "https://healthchecks.gmem.ca/ping/$HEALTHCHECKS_UUID"
''
];
schedule = "*/30 * * * *";
jobTemplate.spec.template.spec = {
restartPolicy = "Never";
containers.refresh-subscriptions = {
image = "debian:bookworm-slim";
envFrom = [{secretRef.name = "postgres-piped";}];
command = [
"/bin/bash"
"-c"
''
apt update && apt install -y postgresql-client curl
curl -o /dev/null "https://healthchecks.gmem.ca/ping/$HEALTHCHECKS_UUID/start"
export PGPASSWORD=$password &&
export subs=$(psql -U piped -h 192.168.50.236 -qtAX -c 'select id from public.pubsub;') &&
while IFS= read -r line; do
echo "refreshing $line"
curl -k -o /dev/null "http://piped-backend:8080/channel/$line"
done < <(printf '%s' "$subs")
curl -o /dev/null "https://healthchecks.gmem.ca/ping/$HEALTHCHECKS_UUID"
''
];
};
};
};
};
};
}

85
homelab/redlib.nix Normal file
View file

@ -0,0 +1,85 @@
let
appName = "redlib";
appImage = "git.gmem.ca/arch/redlib:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.redlib = {
metadata.namespace = "redlib";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.deployments.redlib = {
metadata.namespace = "redlib";
spec = {
selector.matchLabels.app = appName;
replicas = 2;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "redlib";
};
containers = {
redlib = {
image = appImage;
imagePullPolicy = "Always";
ports.http.containerPort = 8080;
resources = {
requests = {
cpu = "100m";
memory = "64Mi";
};
limits = {
memory = "128Mi";
};
};
};
};
};
};
};
};
kubernetes.resources.ingresses.redlib = {
metadata = {
name = appName;
namespace = "redlib";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["red.gmem.ca"];
}
];
rules = [
{
host = "red.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -2,58 +2,59 @@ let
appName = "searxng";
appImage = "docker.io/searxng/searxng:latest";
in
{
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.searxng = {
metadata.namespace = "searxng";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
kubernetes.resources.services.searxng = {
metadata.namespace = "searxng";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
};
kubernetes.resources.deployments.searxng = {
metadata.namespace = "searxng";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "searxng";
};
containers = {
searxng = {
image = appImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/etc/searxng/settings.yml";
subPath = "settings.yml";
}
{
name = "config";
mountPath = "/etc/searxng/limiter.toml";
subPath = "limiter.toml";
}
];
envFrom = [{secretRef.name = "searxng";}];
ports.http.containerPort = 8080;
resources = {
requests = {
cpu = "100m";
memory = "512Mi";
};
limits = {
memory = "1Gi";
kubernetes.resources.deployments.searxng = {
metadata.namespace = "searxng";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "searxng";
};
containers = {
searxng = {
image = appImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/etc/searxng/settings.yml";
subPath = "settings.yml";
}
{
name = "config";
mountPath = "/etc/searxng/limiter.toml";
subPath = "limiter.toml";
}
];
envFrom = [{secretRef.name = "searxng";}];
ports.http.containerPort = 8080;
resources = {
requests = {
cpu = "100m";
memory = "512Mi";
};
limits = {
memory = "1Gi";
};
};
};
};
@ -61,80 +62,80 @@ in
};
};
};
};
kubernetes.resources.configMaps.searxng = {
metadata.namespace = "searxng";
data."settings.yml" = ''
use_default_settings: true
server:
image_proxy: true
http_protocol_version: "1.1"
method: "GET"
ui:
static_use_hash: true
redis:
url: redis://searxng-redis-master:6379/0
general:
instance_name: search.gmem.ca
hostname_replace:
'(.*\.)?youtube\.com$': 'piped.gmem.ca'
'(.*\.)?youtu\.be$': 'piped.gmem.ca'
'(.*\.)?youtube-noocookie\.com$': 'piped.gmem.ca'
'(www\.)?twitter\.com$': 'nitter.gmem.ca'
'(www\.)?x\.com$': 'nitter.gmem.ca'
'';
data."limiter.toml" = ''
# This configuration file updates the default configuration file
# See https://github.com/searxng/searxng/blob/master/searx/botdetection/limiter.toml
kubernetes.resources.configMaps.searxng = {
metadata.namespace = "searxng";
data."settings.yml" = ''
use_default_settings: true
server:
image_proxy: true
http_protocol_version: "1.1"
method: "GET"
ui:
static_use_hash: true
redis:
url: redis://searxng-redis-master:6379/0
general:
instance_name: search.gmem.ca
hostname_replace:
'(.*\.)?youtube\.com$': 'piped.gmem.ca'
'(.*\.)?youtu\.be$': 'piped.gmem.ca'
'(.*\.)?youtube-noocookie\.com$': 'piped.gmem.ca'
'(www\.)?twitter\.com$': 'nitter.gmem.ca'
'(www\.)?x\.com$': 'nitter.gmem.ca'
'(.*\.)?reddit\.com$': 'red.gmem.ca'
'';
data."limiter.toml" = ''
# This configuration file updates the default configuration file
# See https://github.com/searxng/searxng/blob/master/searx/botdetection/limiter.toml
[botdetection.ip_limit]
# activate link_token method in the ip_limit method
link_token = true
'';
};
kubernetes.helm.releases.searxng-redis = {
namespace = "searxng";
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
version = "18.6.1";
sha256 = "CyvGHc1v1BtbzDx6hbbPah2uWpUhlNIUQowephT6hmM=";
[botdetection.ip_limit]
# activate link_token method in the ip_limit method
link_token = true
'';
};
values = {
auth.enabled = false;
architecture = "standalone";
};
};
kubernetes.resources.ingresses.searxng = {
metadata = {
name = appName;
kubernetes.helm.releases.searxng-redis = {
namespace = "searxng";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
version = "18.6.1";
sha256 = "CyvGHc1v1BtbzDx6hbbPah2uWpUhlNIUQowephT6hmM=";
};
values = {
auth.enabled = false;
architecture = "standalone";
};
};
spec = {
tls = [
{
hosts = ["search.gmem.ca"];
}
];
rules = [
{
host = "search.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
kubernetes.resources.ingresses.searxng = {
metadata = {
name = appName;
namespace = "searxng";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["search.gmem.ca"];
}
];
rules = [
{
host = "search.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
};
}
}

View file

@ -3,147 +3,148 @@ let
snikketImage = "git.gmem.ca/arch/snikket-server:latest";
snikketPortalImage = "snikket/snikket-web-portal:stable";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.snikket = {
metadata.namespace = "snikket";
spec = {
selector.app = appName;
ports.http = {
port = 5280;
targetPort = 5280;
};
};
};
kubernetes.resources.services.snikket-xmpp = {
metadata.namespace = "snikket";
spec = {
type = "NodePort";
selector.app = appName;
ports.http = {
port = 5222;
targetPort = 5222;
nodePort = 5222;
};
};
};
kubernetes.resources.services.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.app = appName + "-web-portal";
ports.http = {
port = 5765;
targetPort = 5765;
};
};
};
kubernetes.resources.deployments.snikket = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
snikket = {
image = snikketImage;
env.SNIKKET_TWEAK_TURNSERVER.value = "0";
env.SNIKKET_TWEAK_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
envFrom = [{configMapRef.name = "snikket";}];
imagePullPolicy = "Always";
volumeMounts = [
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.crt";
subPath = "tls.crt";
}
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.key";
subPath = "tls.key";
}
];
ports.http.containerPort = 5280;
};
};
volumes = {
certs.secret.secretName = "chat-gmem-ca";
};
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.snikket = {
metadata.namespace = "snikket";
spec = {
selector.app = appName;
ports.http = {
port = 5280;
targetPort = 5280;
};
};
};
};
kubernetes.resources.deployments.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName + "-web-portal";
template = {
metadata.labels.app = appName + "-web-portal";
spec = {
containers = {
snikket = {
image = snikketPortalImage;
env.SNIKKET_TWEAK_PORTAL_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
env.SNIKKET_WEB_PROSODY_ENDPOINT.value = "http://snikket:5280";
imagePullPolicy = "Always";
ports.http.containerPort = 5765;
};
};
kubernetes.resources.services.snikket-xmpp = {
metadata.namespace = "snikket";
spec = {
type = "NodePort";
selector.app = appName;
ports.http = {
port = 5222;
targetPort = 5222;
nodePort = 5222;
};
};
};
};
kubernetes.resources.ingresses.snikket = {
metadata = {
name = appName;
namespace = "snikket";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
kubernetes.resources.services.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.app = appName + "-web-portal";
ports.http = {
port = 5765;
targetPort = 5765;
};
};
};
spec = {
tls = [
{
hosts = ["chat.gmem.ca"];
}
];
rules = [
{
host = "chat.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName + "-web-portal";
port.name = "http";
kubernetes.resources.deployments.snikket = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
snikket = {
image = snikketImage;
env.SNIKKET_TWEAK_TURNSERVER.value = "0";
env.SNIKKET_TWEAK_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
envFrom = [{configMapRef.name = "snikket";}];
imagePullPolicy = "Always";
volumeMounts = [
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.crt";
subPath = "tls.crt";
}
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.key";
subPath = "tls.key";
}
];
ports.http.containerPort = 5280;
};
}
]
++ lib.lists.forEach [
# Routes we want to hit Prosody's backend
"/admin_api"
"/invites_api"
"/invites_bootstrap"
"/upload"
"/http-bind"
"/xmpp-websocket"
"/.well-known/host-meta"
"/.well-known/host-meta.json"
] (path: {
path = path;
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
});
}
];
volumes = {
certs.secret.secretName = "chat-gmem-ca";
};
};
};
};
};
};
}
kubernetes.resources.deployments.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName + "-web-portal";
template = {
metadata.labels.app = appName + "-web-portal";
spec = {
containers = {
snikket = {
image = snikketPortalImage;
env.SNIKKET_TWEAK_PORTAL_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
env.SNIKKET_WEB_PROSODY_ENDPOINT.value = "http://snikket:5280";
imagePullPolicy = "Always";
ports.http.containerPort = 5765;
};
};
};
};
};
};
kubernetes.resources.ingresses.snikket = {
metadata = {
name = appName;
namespace = "snikket";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["chat.gmem.ca"];
}
];
rules = [
{
host = "chat.gmem.ca";
http.paths =
[
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName + "-web-portal";
port.name = "http";
};
}
]
++ lib.lists.forEach [
# Routes we want to hit Prosody's backend
"/admin_api"
"/invites_api"
"/invites_bootstrap"
"/upload"
"/http-bind"
"/xmpp-websocket"
"/.well-known/host-meta"
"/.well-known/host-meta.json"
] (path: {
path = path;
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
});
}
];
};
};
}

View file

@ -20,7 +20,7 @@ in
kernelModules = ["kvm-amd" "vfio_pci" "vfio" "vfio_iommu_type1" "kvmfr"];
extraModulePackages = with config.boot.kernelPackages; [
# Until https://github.com/NixOS/nixpkgs/pull/305018 is merged.
(pkgs.linuxPackages_latest.kvmfr.overrideAttrs({ ... }: {
(pkgs.linuxPackages_latest.kvmfr.overrideAttrs ({...}: {
patches = [];
}))
];

View file

@ -11,8 +11,8 @@
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
''${builtins.fetchTarball {
url = "https://github.com/nix-community/disko/archive/master.tar.gz";
sha256 = "1wg3nnh8lrc8q8q4qyk9yynsa24qqj9126h3cy0ijq93mz46i1k7";
url = "https://github.com/nix-community/disko/archive/refs/tags/v1.6.1.tar.gz";
sha256 = "1p9vsml07bm3riw703dv83ihlmgyc11qv882qa6bqzqdgn86y8z4";
}}/module.nix''
./disk-config.nix
];

View file

@ -11,8 +11,8 @@
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
''${builtins.fetchTarball {
url = "https://github.com/nix-community/disko/archive/master.tar.gz";
sha256 = "1wg3nnh8lrc8q8q4qyk9yynsa24qqj9126h3cy0ijq93mz46i1k7";
url = "https://github.com/nix-community/disko/archive/refs/tags/v1.6.1.tar.gz";
sha256 = "1p9vsml07bm3riw703dv83ihlmgyc11qv882qa6bqzqdgn86y8z4";
}}/module.nix''
./disk-config.nix
];

View file

@ -9,23 +9,8 @@
config.allowUnfreePredicate = pkg:
builtins.elem (lib.getName pkg) [
"parsec-bin"
"discord"
];
config.allowUnfree = true;
overlays = let
discordOverlay = self: super: {
discord = super.discord.override {
withVencord = true;
withOpenASAR = true;
};
};
in [
(import (builtins.fetchTarball {
url = "https://github.com/nix-community/emacs-overlay/archive/master.tar.gz";
sha256 = "09rsdkn16al5qsyrl5fjrljw7ff0z5yb9ihskbipcdiffcn11kax";
}))
discordOverlay
];
};
home = {
username = "gsimmer";
@ -130,12 +115,13 @@
atuin
age-plugin-yubikey
rage
discord
vesktop
mangohud
comma
transmission_4-qt
ungoogled-chromium
looking-glass-client
senpai
];
# This value determines the Home Manager release that your

View file

@ -30,7 +30,7 @@
fileSystems."/tmp" = {
device = "tmpfs";
fsType = "tmpfs";
options = [ "size=4G" "mode=777" ]; # mode=755 so only root can write to those files
options = ["size=4G" "mode=777"]; # mode=755 so only root can write to those files
};
swapDevices = [

View file

@ -0,0 +1,84 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page, on
# https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
{
config,
lib,
pkgs,
...
}: {
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
];
age.secrets.cloudflared = {
file = ../../secrets/minecraft-server-cloudflared.age;
owner = "cloudflared";
};
nixpkgs.config.allowUnfree = true;
nix = {
settings = {
auto-optimise-store = true;
experimental-features = ["nix-command" "flakes"];
};
};
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking = {
hostName = "minecraft-server"; # Define your hostname.
useDHCP = true;
firewall = {
enable = true;
allowedUDPPorts = [];
allowedTCPPorts = [22 80 443];
trustedInterfaces = ["enp6s18"];
checkReversePath = "loose";
};
nftables.enable = true;
};
services = {
openssh.enable = true;
minecraft-server = {
enable = true;
openFirewall = true;
eula = true;
#package = pkgs.papermc;
};
bluemap = {
enable = true;
eula = true;
defaultWorld = "${config.services.minecraft-server.dataDir}/world";
host = "mc.gmem.ca";
};
cloudflared = {
enable = true;
tunnels.minecraft = {
credentialsFile = config.age.secrets.cloudflared.path;
default = "http_status:404";
ingress = {
"mc.gmem.ca" = "http://localhost:80";
};
warp-routing.enabled = true;
};
};
# nginx reverse proxy
nginx = {
enable = true;
recommendedGzipSettings = true;
recommendedBrotliSettings = true;
recommendedZstdSettings = true;
recommendedOptimisation = true;
recommendedTlsSettings = true;
recommendedProxySettings = true;
};
qemuGuest.enable = true;
};
system.stateVersion = "23.11"; # Did you read the comment?
}

View file

@ -0,0 +1,32 @@
{
disko.devices = {
disk = {
my-disk = {
device = "/dev/vda";
type = "disk";
content = {
type = "gpt";
partitions = {
ESP = {
type = "EF00";
size = "500M";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}

View file

@ -0,0 +1,33 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}: {
imports = [
(modulesPath + "/profiles/qemu-guest.nix")
''${builtins.fetchTarball {
url = "https://github.com/nix-community/disko/archive/refs/tags/v1.6.1.tar.gz";
sha256 = "1p9vsml07bm3riw703dv83ihlmgyc11qv882qa6bqzqdgn86y8z4";
}}/module.nix''
./disk-config.nix
];
boot.initrd.availableKernelModules = ["uhci_hcd" "ehci_pci" "ahci" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod"];
boot.initrd.kernelModules = [];
boot.kernelModules = [];
boot.extraModulePackages = [];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.enp6s18.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}

View file

@ -301,6 +301,22 @@
recommendedZstdSettings = true;
recommendedOptimisation = true;
recommendedTlsSettings = true;
virtualHosts."plex.gmem.ca" = {
enableACME = true;
forceSSL = true;
acmeRoot = null;
locations."/" = {
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
client_max_body_size 500M;
'';
proxyPass = "http://127.0.0.1:32400/";
};
};
virtualHosts."git.gmem.ca" = {
enableACME = true;
addSSL = true;
@ -551,12 +567,17 @@
security.acme.acceptTerms = true;
security.acme.defaults.email = "acme@gmem.ca";
security.acme.certs."git.gmem.ca" = {
domain = "*.gmem.ca";
domain = "git.gmem.ca";
dnsProvider = "cloudflare";
credentialsFile = config.age.secrets.cloudflare-dns.path;
};
security.acme.certs."docs.gmem.ca" = {
domain = "*.gmem.ca";
domain = "docs.gmem.ca";
dnsProvider = "cloudflare";
credentialsFile = config.age.secrets.cloudflare-dns.path;
};
security.acme.certs."plex.gmem.ca" = {
domain = "plex.gmem.ca";
dnsProvider = "cloudflare";
credentialsFile = config.age.secrets.cloudflare-dns.path;
};

View file

@ -44,7 +44,7 @@
}
{
from = 8000;
to = 8010;
to = 8900;
}
];
allowedUDPPorts = [41641 1935];
@ -81,12 +81,23 @@
openssh.enable = true;
tailscale.enable = true;
owncast = {
enable = true;
enable = false;
port = 8080;
openFirewall = false;
};
mediamtx = {
enable = true;
settings = {
rtmp = false;
pathDefaults.srtPublishPassphrase = "thisisntforyoutouse";
paths = {
eufuria = {
srtPublishPassphrase = "beckiiscute";
};
};
};
};
nginx = {
additionalModules = [pkgs.nginxModules.rtmp];
enable = true;
recommendedProxySettings = true;
recommendedGzipSettings = true;
@ -109,29 +120,6 @@
};
};
};
appendConfig = ''
rtmp {
server {
listen 1936;
chunk_size 4096;
application live {
live on;
allow publish 127.0.0.1;
allow publish 100.110.180.123;
allow publish fd7a:115c:a1e0::246e:b47b;
deny publish all;
allow play all;
record off;
hls on;
hls_path /tmp/hls;
dash on;
dash_path /tmp/dash;
}
}
}
'';
};
};

View file

@ -8,6 +8,8 @@ let
dnsmasq = [dnsmasq-cache dnsmasq-cache-floof];
minecraft-server = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINkLMVdCnjFsZ3tg7s3fE64VBw4QIekgMt2fAY1E79wv";
proxmox-k3s-node = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB1KEjdFl0UmuKfESJTMZdKR2H9a405z0SSlt75NKKht";
seattle = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF9pTEqeVljLq0ctFgDn25Q76mCqpddkSNN9kd3IQXd1";
glasgow = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMgZSpfnx/4kfE4P1tFpq047IZkF2Q0UYahputnWxtEJ";
@ -33,4 +35,6 @@ in {
"secrets/paperless-oauth.age".publicKeys = [vancouver] ++ users;
"secrets/dnsmasq-nextdns-profile.age".publicKeys = dnsmasq ++ users;
"secrets/minecraft-server-cloudflared.age".publicKeys = [minecraft-server] ++ users;
}

Binary file not shown.

BIN
secrets/paperless-oauth.age Normal file

Binary file not shown.