Compare commits

..

7 commits

Author SHA1 Message Date
Gabriel Simmer ec04275b34
dns updates for haproxy endpoint
All checks were successful
Lint / lint (push) Successful in 26s
2024-05-04 15:28:48 +01:00
Gabriel Simmer 254fbde3c2
haproxy scrape config 2024-05-04 15:28:21 +01:00
Gabriel Simmer 245a51ecfd
New cluster, proper namespaces, PostgreSQL migration 2024-05-04 15:27:33 +01:00
Gabriel Simmer 24931c2c7f
Increase /tmp to 4G 2024-05-04 15:11:24 +01:00
Gabriel Simmer 8a5af77597
Fix Loki object store setup 2024-05-04 15:11:07 +01:00
Gabriel Simmer f86bf5acea
Decomission seattle and glasgow
Moved to Talos
2024-05-04 15:10:49 +01:00
Gabriel Simmer b84b214541
Additional nitter bot configuration 2024-05-04 15:09:56 +01:00
44 changed files with 709 additions and 895 deletions

View file

@ -13,6 +13,10 @@ client = discord.Client(intents=intents, activity=discord.Game('Swearing at Twit
tree = app_commands.CommandTree(client)
nitter_internal = os.environ['NITTER_URL']
nitter_external = os.getenv('NITTER_EXTERNAL_URL', nitter_internal)
@client.event
async def on_ready():
await tree.sync()
@ -33,8 +37,10 @@ async def nitter(interaction: discord.Interaction, link: str):
await interaction.response.send_message('invalid twitter link', ephemeral=True)
return
nitter_url = f'https://nitter.gmem.ca{urlparsed.path}'
response = requests.get(nitter_url)
internal_nitter_url = f'{nitter_internal}{urlparsed.path}'
nitter_url = f'{nitter_external}{urlparsed.path}'
response = requests.get(internal_nitter_url)
# 4xx error codes
if 399 < response.status_code < 500:
await interaction.response.send_message('could not find tweet/user', ephemeral=True)

View file

@ -27,26 +27,27 @@
};
"cluster" = {
a = {
data = ["100.77.43.133" "100.121.5.8" "100.106.229.20"];
data = ["100.77.43.133"];
};
aaaa = {
data = [
"fd7a:115c:a1e0:ab12:4843:cd96:624d:2b85"
"fd7a:115c:a1e0:ab12:4843:cd96:626a:e514"
"fd7a:115c:a1e0::ad79:508"
];
};
};
"newcluster" = {
a = {
ttl = 3600;
data = ["100.87.208.14"];
};
aaaa = {
ttl = 3600;
data = [ "fd7a:115c:a1e0::2001:d00e" ];
};
};
"homelab" = {
a = {
data = ["192.168.50.146" "192.168.50.134" "192.168.50.144"];
};
aaaa = {
data = [
"2a02:1648:6709:0:da3a:ddff:fe18:f4ca"
"2a02:1648:6709:0:a5ab:461a:52b:f6c5"
"2a02:1648:6709:0:dea6:32ff:fea0:b84e"
];
data = ["192.168.50.45"];
};
};
"_acme-challenge.router" = {
@ -57,36 +58,39 @@
}
// lib.attrsets.genAttrs [
# Internally hosted applications
"atuin"
"dref"
"freshrss"
"hb"
"home"
"hue"
"netboot"
"pipedapi"
"piped"
"request-media"
"tools"
"ytproxy"
"changedetect"
] (name: {cname.data = "cluster";})
// lib.attrsets.genAttrs [
# Internally hosted applications
"atuin"
"pipedapi"
"piped"
"tools"
"ytproxy"
"irc"
"hue"
"home"
"hb"
"rss"
"request-media"
"ntfy"
] (name: {cname.data = "newcluster";})
// lib.attrsets.genAttrs [
# Externally hosted applications with Tunnels
"git"
"authentik"
"games"
"photos"
"pw"
"nitter"
"git"
"ibiza"
"matrix"
"photos"
"proxmox"
"pw"
"tokyo"
"nitter"
] (name: {
cname = {
ttl = 0;
data = "b325b440-3d49-43e4-a028-be516e8f9bc3.cfargotunnel.com.";
data = "a1544154-d851-44ee-8d3a-9fa245867745.cfargotunnel.com.";
};
});
};

View file

@ -1,9 +1,7 @@
{lib, ...}: let
tailscale =
lib.lists.forEach [
"git"
"authentik"
"games"
"ibiza"
"matrix"
"photos"
@ -13,7 +11,7 @@
"nitter"
] (name: {
name = name + ".gmem.ca";
content = "cluster.gmem.ca";
content = "newcluster.gmem.ca";
});
home =
lib.lists.forEach [

View file

@ -239,11 +239,11 @@
]
},
"locked": {
"lastModified": 1713906585,
"narHash": "sha256-fv84DCOkBtjF6wMATt0rfovu7e95L8rdEkSfNbwKR3U=",
"lastModified": 1714515075,
"narHash": "sha256-azMK7aWH0eUc3IqU4Fg5rwZdB9WZBvimOGG3piqvtsY=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "bfa7c06436771e3a0c666ccc6ee01e815d4c33aa",
"rev": "6d3b6dc9222c12b951169becdf4b0592ee9576ef",
"type": "github"
},
"original": {
@ -279,11 +279,11 @@
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1713701427,
"narHash": "sha256-v6z8hz/UDaC/rbnkH+hxGFUxlNyseVntRetVpSxLU6c=",
"lastModified": 1714306226,
"narHash": "sha256-CA7bfnDt9TcFc7I8eKHf72DodYUEETDPgmBFXBRP9/E=",
"owner": "nix-community",
"repo": "lib-aggregate",
"rev": "3b32a98eb3053f8c8ca55497d1881443ef2996e6",
"rev": "49d9b510614b9bd137e067eb31445a8feca83313",
"type": "github"
},
"original": {
@ -414,11 +414,11 @@
},
"nixos-hardware": {
"locked": {
"lastModified": 1713864415,
"narHash": "sha256-/BPDMJEkrsFAFOsQWhwm31wezlgshPFlLBn34KEUdVA=",
"lastModified": 1714465198,
"narHash": "sha256-ySkEJvS0gPz2UhXm0H3P181T8fUxvDVcoUyGn0Kc5AI=",
"owner": "NixOS",
"repo": "nixos-hardware",
"rev": "797f8d8082c7cc3259cba7275c699d4991b09ecc",
"rev": "68d680c1b7c0e67a9b2144d6776583ee83664ef4",
"type": "github"
},
"original": {
@ -446,11 +446,11 @@
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1713660444,
"narHash": "sha256-2bVnrEGyWJhRNKspzfTJmVD/fsH9HQURD4cWpz79Ulw=",
"lastModified": 1714265296,
"narHash": "sha256-jVnKiCOoFulPT1zDdA4jfG/lnEnngdth5CT6rVDXEJ4=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "6882347415e352cfc9c277cc01f73e0f5cb7b93c",
"rev": "ade4fb7bbf04cd52bc1705734d5dc67755d77ec9",
"type": "github"
},
"original": {
@ -469,11 +469,11 @@
]
},
"locked": {
"lastModified": 1713978995,
"narHash": "sha256-eqAZRB3a7wf44Ek+g4c22CjkhFUcypmWCRyDkKZM328=",
"lastModified": 1714525911,
"narHash": "sha256-XYARtyCpKeL0IosMSzeHl6YFblV3n4y7plM+K9fg4N4=",
"owner": "nix-community",
"repo": "nixpkgs-wayland",
"rev": "e5f9ab26aa52ae7aee95b479652b54e2e5248da0",
"rev": "4cbf82124f2c03fa5b1b669771c48f9927264684",
"type": "github"
},
"original": {
@ -532,11 +532,11 @@
},
"nixpkgs_5": {
"locked": {
"lastModified": 1713805509,
"narHash": "sha256-YgSEan4CcrjivCNO5ZNzhg7/8ViLkZ4CB/GrGBVSudo=",
"lastModified": 1714314149,
"narHash": "sha256-yNAevSKF4krRWacmLUsLK7D7PlfuY3zF0lYnGYNi9vQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1e1dc66fe68972a76679644a5577828b6a7e8be4",
"rev": "cf8cc1201be8bc71b7cbbbdaf349b22f4f99c7ae",
"type": "github"
},
"original": {

View file

@ -352,40 +352,6 @@
}
];
};
seattle = nixpkgs.lib.nixosSystem {
system = "aarch64-linux";
modules = [
nixos-hardware.nixosModules.raspberry-pi-4
agenix.nixosModules.default
(import ./nix/seattle/configuration.nix)
{
_module.args.nixinate = {
host = "seattle";
sshUser = "gsimmer";
buildOn = "remote";
substituteOnTarget = true;
hermetic = false;
};
}
];
};
glasgow = nixpkgs.lib.nixosSystem {
system = "aarch64-linux";
modules = [
agenix.nixosModules.default
nixos-hardware.nixosModules.raspberry-pi-4
(import ./nix/glasgow/configuration.nix)
{
_module.args.nixinate = {
host = "glasgow";
sshUser = "gsimmer";
buildOn = "remote";
substituteOnTarget = true;
hermetic = false;
};
}
];
};
proxmox-k3s-node-1 = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [

View file

@ -3,6 +3,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: atuin
namespace: atuin
spec:
replicas: 1
selector:
@ -18,12 +19,10 @@ spec:
- server
- start
env:
- name: RUST_LOG
value: debug,atuin_server=debug
- name: ATUIN_DB_URI
valueFrom:
secretKeyRef:
name: hippo-pguser-atuin
name: postgres-atuin
key: uri
optional: false
- name: ATUIN_HOST
@ -31,8 +30,8 @@ spec:
- name: ATUIN_PORT
value: "8888"
- name: ATUIN_OPEN_REGISTRATION
value: "true"
image: ghcr.io/atuinsh/atuin:v18.0.0
value: "false"
image: ghcr.io/atuinsh/atuin:v18.2.0
name: atuin
ports:
- containerPort: 8888
@ -62,6 +61,7 @@ apiVersion: v1
kind: Service
metadata:
name: atuin
namespace: atuin
spec:
selector:
app: atuin
@ -74,15 +74,14 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: atuin
namespace: atuin
annotations:
cert-manager.io/issuer: "le-issuer"
cert-manager.io/cluster-issuer: "le-issuer"
nginx.ingress.kubernetes.io/proxy-body-size: 1024m
namespace: default
spec:
tls:
- hosts:
- atuin.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: atuin.gmem.ca
http:

View file

@ -13,24 +13,21 @@ global:
name: authentik-secrets
key: secret-key
- name: AUTHENTIK_POSTGRESQL__HOST
valueFrom:
secretKeyRef:
name: hippo-pguser-authentik
key: host
value: 192.168.50.236
- name: AUTHENTIK_POSTGRESQL__PASSWORD
valueFrom:
secretKeyRef:
name: hippo-pguser-authentik
name: postgres-authentik
key: password
- name: AUTHENTIK_POSTGRESQL__USER
valueFrom:
secretKeyRef:
name: hippo-pguser-authentik
name: postgres-authentik
key: user
- name: AUTHENTIK_POSTGRESQL__PORT
valueFrom:
secretKeyRef:
name: hippo-pguser-authentik
name: postgres-authentik
key: port
server:
@ -44,6 +41,5 @@ server:
tls:
- hosts:
- authentik.gmem.ca
secretName: gmem-ca-wildcard
redis:
enabled: true

View file

@ -5,7 +5,7 @@
...
}: {
kubernetes.helm.releases.cloudflare-exporter = {
namespace = "default";
namespace = "cloudflare";
chart = kubenix.lib.helm.fetch {
repo = "https://lablabs.github.io/cloudflare-exporter";
chart = "cloudflare-exporter";

View file

@ -3,11 +3,13 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: cloudflared
namespace: cloudflare
spec:
selector:
matchLabels:
app: cloudflared
replicas: 3
replicas: 2
template:
metadata:
labels:
@ -15,7 +17,7 @@ spec:
spec:
containers:
- name: cloudflared
image: cloudflare/cloudflared:2024.2.1
image: cloudflare/cloudflared:2024.4.1
args:
- tunnel
- --config
@ -55,6 +57,8 @@ apiVersion: v1
kind: Service
metadata:
name: cloudflared-metrics
namespace: cloudflare
spec:
selector:
app: cloudflared
@ -67,6 +71,7 @@ apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: cloudflared
namespace: cloudflare
labels:
release: prometheus
spec:
@ -76,3 +81,35 @@ spec:
podMetricsEndpoints:
- port: metrics
interval: 30s
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cloudflared
namespace: cloudflare
data:
config.yaml: |
tunnel: new-homelab
credentials-file: /etc/cloudflared/creds/credentials.json
metrics: 0.0.0.0:2000
no-autoupdate: true
ingress:
- hostname: photos.gmem.ca
service: http://immich-server.immich.svc.cluster.local:3001
- hostname: pw.gmem.ca
service: http://vaultwarden.vaultwarden.svc.cluster.local:80
- hostname: authentik.gmem.ca
service: http://authentik-server.authentik.svc.cluster.local:80
- hostname: nitter.gmem.ca
service: http://nitter.nitter.svc.cluster.local:8081
- hostname: git.gmem.ca
service: http://192.168.50.229
- hostname: proxmox.gmem.ca
service: http://proxmox.endpoints.svc.cluster.local:8006
- hostname: tokyo.gmem.ca
service: http://tokyo.endpoints.svc.cluster.local:8000
- hostname: ibiza.gmem.ca
service: http://ibiza.endpoints.svc.cluster.local:8000
- hostname: chat.gmem.ca
service: tcp://192.168.50.45:443
- service: http_status:404

View file

@ -1,7 +1,7 @@
let
endpoints = {
"proxmox" = {
location = "100.100.75.80";
location = "192.168.50.3";
host = "proxmox.gmem.ca";
port = 8006;
protocol = "HTTPS";
@ -28,6 +28,7 @@ let
in {
kubernetes.resources.services =
builtins.mapAttrs (name: endpoint: {
metadata.namespace = "endpoints";
spec = {
ports.${name} = {
port = endpoint.port;
@ -38,6 +39,7 @@ in {
endpoints;
kubernetes.resources.endpoints =
builtins.mapAttrs (name: endpoint: {
metadata.namespace = "endpoints";
subsets = [
{
addresses = [{ip = endpoint.location;}];
@ -56,9 +58,10 @@ in {
builtins.mapAttrs (name: endpoint: {
metadata = {
name = name;
namespace = "endpoints";
annotations = {
"nginx.ingress.kubernetes.io/proxy-body-size" = "10g";
"cert-manager.io/issuer" = "le-issuer";
"cert-manager.io/cluser-issuer" = "le-issuer";
"nginx.ingress.kubernetes.io/backend-protocol" = endpoint.protocol;
};
};
@ -66,7 +69,6 @@ in {
tls = [
{
hosts = [endpoint.host];
secretName = "gmem-ca-wildcard";
}
];
rules = [

View file

@ -1,103 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: freshrss
spec:
selector:
matchLabels:
app: freshrss
template:
metadata:
labels:
app: freshrss
spec:
containers:
- name: freshrss
image: freshrss/freshrss:1.22.1-arm
resources:
limits:
memory: "256Mi"
cpu: "500m"
ports:
- containerPort: 80
env:
- name: CRON_MIN
value: 1,31
envFrom:
- configMapRef:
name: freshrss-config
- secretRef:
name: freshrss-secrets
volumeMounts:
- name: data
mountPath: /var/www/FreshRSS/data
- name: extension-data
mountPath: /var/www/FreshRSS/data/extensions
volumes:
- name: data
persistentVolumeClaim:
claimName: freshrss-data
- name: extension-data
persistentVolumeClaim:
claimName: freshrss-extension-data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: freshrss-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: nfs-client
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: freshrss-extension-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: nfs-client
---
apiVersion: v1
kind: Service
metadata:
name: freshrss
spec:
type: ClusterIP
selector:
app: freshrss
ports:
- port: 80
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: freshrss
annotations:
cert-manager.io/issuer: "le-issuer"
namespace: default
spec:
tls:
- hosts:
- freshrss.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: freshrss.gmem.ca
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: freshrss
port:
number: 80

View file

@ -72,13 +72,11 @@ kind: Ingress
metadata:
name: homebridge
annotations:
cert-manager.io/issuer: "le-issuer"
namespace: default
cert-manager.io/cluster-issuer: "le-issuer"
spec:
tls:
- hosts:
- hb.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: hb.gmem.ca
http:

View file

@ -16,7 +16,7 @@
description = "Plex";
widget = {
type = "plex";
url = "http://vancouver:32400";
url = "http://192.168.50.229:32400";
key = "{{HOMEPAGE_VAR_PLEX_KEY}}";
};
};
@ -142,16 +142,10 @@
{
Reading = [
{
FreshRSS = {
icon = "freshrss.png";
href = "https://freshrss.gmem.ca";
description = "FreshRSS RSS Reader";
widget = {
type = "freshrss";
url = "https://freshrss.gmem.ca";
username = "arch";
password = "{{HOMEPAGE_VAR_FRESHRSS_PASSWORD}}";
};
miniflux = {
icon = "miniflux.png";
href = "https://rss.gmem.ca";
description = "Miniflux RSS Reader";
};
}
{
@ -309,6 +303,7 @@
};
in {
kubernetes.helm.releases.homepage = {
namespace = "homepage";
chart = kubenix.lib.helm.fetch {
repo = "https://jameswynn.github.io/helm-charts";
chart = "homepage";
@ -350,7 +345,7 @@ in {
};
kubernetes.resources.deployments.homepage = {
metadata.namespace = "default";
metadata.namespace = "homepage";
spec.template = {
metadata.annotations."gmem.ca/homepage-config-hash" = builtins.hashString "md5" (builtins.toJSON homepage-config);

View file

@ -2,7 +2,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: hue
namespace: default
spec:
selector:
matchLabels:
@ -14,7 +13,7 @@ spec:
spec:
containers:
- name: hue
image: icr.gmem.ca/hue
image: git.gmem.ca/arch/hue
resources:
limits:
memory: "32Mi"
@ -54,13 +53,11 @@ kind: Ingress
metadata:
name: hue
annotations:
cert-manager.io/issuer: "le-issuer"
namespace: default
cert-manager.io/cluser-issuer: "le-issuer"
spec:
tls:
- hosts:
- hue.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: hue.gmem.ca
http:

View file

@ -5,34 +5,37 @@
...
}: {
kubernetes.helm.releases.immich = {
namespace = "immich";
chart = kubenix.lib.helm.fetch {
repo = "https://immich-app.github.io/immich-charts";
chart = "immich";
version = "0.4.0";
sha256 = "qekwsAke6NBwhlbt7nIkuwTSIydcWOq/kETooYb64oY=";
version = "0.6.0";
sha256 = "p9fgqRMxRJ2rMBZZfMKuAIjp/N1/KgKCKLDhoXO0O6c=";
};
# arbitrary attrset passed as values to the helm release
values = {
image.tag = "v1.98.2";
image.tag = "v1.102.3";
machine-learning.enabled = false;
immich.persistence.library.existingClaim = "immich";
redis.enabled = true;
redis = {
enabled = true;
};
env = {
PGSSLMODE = "no-verify";
DB_URL.valueFrom.secretKeyRef = {
name = "hippo-pguser-immich";
key = "uri";
DB_PASSWORD.valueFrom.secretKeyRef = {
name = "postgres-immich";
key = "password";
};
DB_HOSTNAME.value = "192.168.50.236";
};
server.ingress.main = {
enabled = true;
annotations = {
"cert-manager.io/issuer" = "le-issuer";
"cert-manager.io/cluster-issuer" = "le-issuer";
};
tls = [
{
hosts = ["photos.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
hosts = [
@ -46,7 +49,10 @@
};
kubernetes.resources.persistentVolumeClaims.immich = {
metadata.name = "immich";
metadata = {
name = "immich";
namespace = "immich";
};
spec = {
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "50Gi";

View file

@ -4,16 +4,19 @@ let
gamjaImage = "git.gmem.ca/arch/gamja:latest";
in {
kubernetes.resources.services.soju = {
metadata.namespace = "irc";
spec = {
type = "NodePort";
selector.app = appName;
ports.tls = {
port = 6697;
targetPort = 6697;
nodePort = 6697;
};
};
};
kubernetes.resources.services.soju-ws = {
metadata.namespace = "irc";
spec = {
selector.app = appName;
ports.ws = {
@ -23,6 +26,7 @@ in {
};
};
kubernetes.resources.services.gamja = {
metadata.namespace = "irc";
spec = {
selector.app = "gamja";
ports.http = {
@ -31,55 +35,57 @@ in {
};
};
};
kubernetes.resources.deployments.soju.spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "soju";
ssl.secret.secretName = "gmem-ca-wildcard";
};
containers = {
soju = {
image = sojuImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/etc/soju/config";
subPath = "config";
}
{
name = "ssl";
mountPath = "/ssl";
}
];
ports.tls.containerPort = 6697;
ports.ws.containerPort = 80;
kubernetes.resources.deployments.soju = {
metadata.namespace = "irc";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "soju";
ssl.secret.secretName = "irc-gmem-ca";
};
containers = {
soju = {
image = sojuImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/etc/soju/config";
subPath = "config";
}
{
name = "ssl";
mountPath = "/ssl";
}
];
ports.tls.containerPort = 6697;
ports.ws.containerPort = 80;
env.PGHOST.valueFrom.secretKeyRef = {
name = "hippo-pguser-soju";
key = "host";
};
env.PGPASSWORD.valueFrom.secretKeyRef = {
name = "hippo-pguser-soju";
key = "password";
};
env.PGHOST.value = "192.168.50.236";
env.PGPASSWORD.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "password";
};
env.PGUSER.valueFrom.secretKeyRef = {
name = "hippo-pguser-soju";
name = "postgres-soju";
key = "user";
};
env.PGDATABASE.valueFrom.secretKeyRef = {
name = "hippo-pguser-soju";
name = "postgres-soju";
key = "dbname";
};
};
};
};
};
};
};
kubernetes.resources.deployments.gamja.spec = {
kubernetes.resources.deployments.gamja = {
metadata.namespace = "irc";
spec = {
selector.matchLabels.app = "gamja";
template = {
metadata.labels.app = "gamja";
@ -93,17 +99,20 @@ in {
};
};
};
};
};
kubernetes.resources.ingresses.irc = {
metadata.namespace = "irc";
metadata.annotations = {
"cert-manager.io/issuer" = "le-issuer";
"cert-manager.io/cluster-issuer" = "le-issuer";
"nginx.ingress.kubernetes.io/proxy-read-timeout" = "3600";
"nginx.ingress.kubernetes.io/proxy-send-timeout" = "3600";
};
spec = {
tls = [
{
hosts = ["irc.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [
@ -132,7 +141,9 @@ in {
};
};
kubernetes.resources.configMaps.soju.data.config = ''
kubernetes.resources.configMaps.soju = {
metadata.namespace = "irc";
data.config = ''
listen ircs://
listen unix+admin:///app/admin
listen ws+insecure://
@ -142,4 +153,5 @@ in {
message-store db
tls /ssl/tls.crt /ssl/tls.key
'';
};
}

View file

@ -1,5 +1,5 @@
apiVersion: cert-manager.io/v1
kind: Issuer
kind: ClusterIssuer
metadata:
name: le-issuer
spec:
@ -29,7 +29,7 @@ metadata:
spec:
secretName: gmem-ca-wildcard
issuerRef:
kind: Issuer
kind: ClusterIssuer
name: le-issuer
commonName: "*.gmem.ca"
dnsNames:

View file

@ -15,15 +15,16 @@
(import ./immich.nix)
(import ./endpoints.nix)
(import ./homepage.nix)
# (import ./pterodactyl.nix)
(import ./cloudflare-exporter.nix)
(import ./piped.nix)
(import ./conduit.nix)
# (import ./conduit.nix)
(import ./irc.nix)
(import ./netboot.nix)
# (import ./netboot.nix)
(import ./nitter.nix)
(import ./changedetection.nix)
# (import ./changedetection.nix)
(import ./nextdns-exporter.nix)
(import ./nitter-bot.nix)
(import ./miniflux.nix)
# (import ./snikket.nix)
];
}

103
homelab/miniflux.nix Normal file
View file

@ -0,0 +1,103 @@
let
appName = "miniflux";
appImage = "docker.io/miniflux/miniflux";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
miniflux = {
image = appImage;
envFrom = [{secretRef.name = "miniflux";}
{configMapRef.name = "miniflux";}];
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 8080;
};
};
};
};
};
};
kubernetes.resources.services.miniflux = {
metadata.namespace = "miniflux";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.ingresses.miniflux = {
metadata.namespace = "miniflux";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
spec = {
tls = [
{
hosts = ["rss.gmem.ca"];
}
];
rules = [
{
host = "rss.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "miniflux";
port.number = 8080;
};
}
];
}
];
};
};
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
endpoints = [
{
port = "http";
interval = "60s";
}
];
};
};
kubernetes.resources.configMaps.miniflux = {
metadata.namespace = "miniflux";
data = {
CLEANUP_ARCHIVE_UNREAD_DAYS = "60";
METRICS_COLLECTOR = "1";
METRICS_ALLOWED_NETWORKS = "0.0.0.0/0";
BASE_URL = "https://rss.gmem.ca/";
};
};
}

View file

@ -3,6 +3,7 @@ let
nextdns-exporterImage = "ghcr.io/raylas/nextdns-exporter:0.5.3";
in {
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.nextdns-exporter = {
metadata.namespace = "prometheus";
metadata.labels.app = appName;
spec = {
selector.matchLabels.app = appName;
@ -20,6 +21,7 @@ in {
};
kubernetes.resources.services.nextdns-exporter-metrics = {
metadata.namespace = "prometheus";
metadata.labels.app = appName;
spec = {
selector.app = appName;
@ -34,24 +36,27 @@ in {
};
};
kubernetes.resources.deployments.nextdns-exporter.spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
nextdns-exporter = {
image = nextdns-exporterImage;
imagePullPolicy = "Always";
ports.metrics.containerPort = 9948;
envFrom = [{secretRef.name = "nextdns-exporter";}];
};
nextdns-ts-exporter = {
image = nextdns-exporterImage;
imagePullPolicy = "Always";
ports.metrics.containerPort = 9949;
env.METRICS_PORT.value = "9949";
envFrom = [{secretRef.name = "nextdns-ts-exporter";}];
kubernetes.resources.deployments.nextdns-exporter = {
metadata.namespace = "prometheus";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
nextdns-exporter = {
image = nextdns-exporterImage;
imagePullPolicy = "Always";
ports.metrics.containerPort = 9948;
envFrom = [{secretRef.name = "nextdns-exporter";}];
};
nextdns-ts-exporter = {
image = nextdns-exporterImage;
imagePullPolicy = "Always";
ports.metrics.containerPort = 9949;
env.METRICS_PORT.value = "9949";
envFrom = [{secretRef.name = "nextdns-ts-exporter";}];
};
};
};
};

View file

@ -1,7 +1,8 @@
nfs:
server: vancouver
path: /Primary/k3scluster
server: 192.168.50.229
path: /tank/k3scluster
storageClass:
defaultClass: true
archiveOnDelete: false
onDelete: delete

View file

@ -9,24 +9,32 @@
chart = kubenix.lib.helm.fetch {
repo = "https://kubernetes.github.io/ingress-nginx";
chart = "ingress-nginx";
version = "4.9.1";
sha256 = "sha256-EJjNTC7nQUbGnS0xgF/eWyKs3vBpRPbbZmwl/pd9/44=";
version = "4.10.1";
sha256 = "BHRoXG5EtJdCGkzy52brAtEcMEZP+WkNtfBf+cwpNbs=";
};
values = {
controller = {
kind = "DaemonSet";
metrics = {
enabled = true;
serviceMonitor.enabled = true;
additionalLabels.release = "prometheus";
};
podAnnotations = {
"prometheus.io/scrape" = "true";
"prometheus.io/port" = "10254";
};
tolerations = [
{
key = "node-role.kubernetes.io/control-plane";
effect = "NoSchedule";
}
];
ingressClassResource.default = true;
publishService.enabled = true;
service.externalTrafficPolicy = "Local";
hostNetwork = true;
extraArgs.default-ssl-certificate = "cert-manager/gmem-ca-wildcard";
};
};
};

View file

@ -2,13 +2,15 @@ let
appName = "nitter-bot";
appImage = "git.gmem.ca/arch/nitter-bot:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.statefulSets.nitter-bot.spec = {
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.statefulSets.nitter-bot = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
@ -17,7 +19,8 @@ in
containers = {
nitter-bot = {
image = appImage;
envFrom = [{secretRef.name = "nitter-bot";}];
envFrom = [{secretRef.name = "nitter-bot";}
{configMapRef.name = "nitter-bot";}];
resources = {
requests = {
cpu = "1m";
@ -33,4 +36,13 @@ in
};
};
};
}
};
kubernetes.resources.configMaps.nitter-bot = {
metadata.namespace = "nitter";
data = {
NITTER_URL = "http://nitter:8080";
NITTER_EXTERNAL_URL = "https://nitter.gmem.ca";
};
};
}

View file

@ -9,6 +9,7 @@ in
...
}: {
kubernetes.resources.services.nitter = {
metadata.namespace = "nitter";
spec = {
selector.app = appName;
ports.http = {
@ -21,7 +22,9 @@ in
};
};
};
kubernetes.resources.deployments.nitter.spec = {
kubernetes.resources.deployments.nitter = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
@ -68,8 +71,10 @@ in
};
};
};
};
};
kubernetes.helm.releases.nitter-redis = {
namespace = "nitter";
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
@ -84,15 +89,15 @@ in
kubernetes.resources.ingresses.nitter = {
metadata = {
name = appName;
namespace = "nitter";
annotations = {
"cert-manager.io/issuer" = "le-issuer";
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["nitter.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [

View file

@ -2,6 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: ntfy
namespace: ntfy
spec:
selector:
matchLabels:
@ -35,6 +36,7 @@ apiVersion: v1
kind: Service
metadata:
name: ntfy
namespace: ntfy
spec:
selector:
app: ntfy
@ -46,6 +48,7 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: ntfy
namespace: ntfy
data:
server.yml: |
# Template: https://github.com/binwiederhier/ntfy/blob/main/server/server.yml
@ -58,13 +61,12 @@ kind: Ingress
metadata:
name: ntfy
annotations:
cert-manager.io/issuer: "le-issuer"
namespace: default
cert-manager.io/cluster-issuer: "le-issuer"
namespace: ntfy
spec:
tls:
- hosts:
- ntfy.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: ntfy.gmem.ca
http:

View file

@ -1,8 +1,9 @@
let
appName = "overseerr";
appImage = "sctx/overseerr";
appName = "jellyseerr";
appImage = "git.gmem.ca/arch/jellyseerr:postgres";
in {
kubernetes.resources.services.overseerr = {
kubernetes.resources.services.jellyseerr = {
metadata.namespace = "jellyseerr";
spec = {
selector.app = appName;
ports.http = {
@ -11,62 +12,57 @@ in {
};
};
};
kubernetes.resources.statefulSets.overseerr.spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "overseerr";
};
containers = {
overseerr = {
image = appImage;
volumeMounts = [
{
name = "data";
mountPath = "/app/config";
}
];
ports.metrics.containerPort = 5055;
resources = {
requests = {
cpu = "500m";
memory = "128Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
kubernetes.resources.deployments.jellyseerr = {
metadata.namespace = "jellyseerr";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "jellyseerr";
};
containers = {
jellyseerr = {
image = appImage;
envFrom = [{secretRef.name = "jellyseerr";}
{configMapRef.name = "jellyseerr";}];
volumeMounts = [
{
name = "config";
mountPath = "/app/config/settings.json";
subPath = "settings.json";
}
];
ports.http.containerPort = 5055;
resources = {
requests = {
cpu = "500m";
memory = "128Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
};
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "data";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "1Gi";
};
}
];
};
kubernetes.resources.ingresses.overseerr = {
kubernetes.resources.ingresses.jellyseerr = {
metadata = {
name = appName;
namespace = "jellyseerr";
annotations = {
"cert-manager.io/issuer" = "le-issuer";
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["request-media.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [

View file

@ -5,7 +5,7 @@
...
}: {
kubernetes.helm.releases.piped = {
namespace = "default";
namespace = "piped";
chart = kubenix.lib.helm.fetch {
repo = "https://helm.piped.video";
chart = "piped";
@ -25,13 +25,12 @@
password = "password";
};
};
fontend.env.BACKEND_HOSTNAME = "pipedapi.gmem.ca";
frontend.env.BACKEND_HOSTNAME = "pipedapi.gmem.ca";
ingress = {
main = {
tls = [
{
hosts = ["piped.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
hosts = [
@ -45,7 +44,6 @@
tls = [
{
hosts = ["pipedapi.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
hosts = [
@ -58,8 +56,7 @@
ytproxy = {
tls = [
{
hosts = ["ytproxy.gmem.ca"];
secretName = "gmem-ca-wildcard";
hosts = ["pipedproxy.gmem.ca"];
}
];
hosts = [
@ -73,27 +70,30 @@
};
};
kubernetes.resources.cronJobs.piped-refresh.spec = {
kubernetes.resources.cronJobs.piped-refresh = {
metadata.namespace = "piped";
spec = {
schedule = "*/10 * * * *";
jobTemplate.spec.template.spec = {
restartPolicy = "Never";
containers.refresh-subscriptions = {
image = "alpine:3.15";
envFrom = [{secretRef.name = "hippo-pguser-piped";}];
image = "debian:bookworm-slim";
envFrom = [{secretRef.name = "postgres-piped";}];
command = [
"/bin/ash"
"/bin/bash"
"-c"
''
apk --no-cache add postgresql-client curl &&
apt update && apt install -y postgresql-client curl
export PGPASSWORD=$password &&
export subs=$(psql -U piped -h hippo-primary.default.svc -qtAX -c 'select id from public.pubsub;') &&
export subs=$(psql -U piped -h 192.168.50.236 -qtAX -c 'select id from public.pubsub;') &&
while IFS= read -r line; do
echo "refreshing $line"
curl -k -S -s -o /dev/null "https://pipedapi.gmem.ca/channel/$line"
curl -k -o /dev/null "http://piped-backend:8080/channel/$line"
done < <(printf '%s' "$subs")
''
];
};
};
};
};
}

View file

@ -11,7 +11,7 @@ spec:
name: init-sql
instances:
- name: instance1
replicas: 3
replicas: 1
dataVolumeClaimSpec:
accessModes:
- "ReadWriteOnce"

View file

@ -14,6 +14,13 @@ prometheus:
password:
name: prometheus-remote-basic-auth
key: password
additionalScrapeConfigs:
- job_name: postgresql
scrape_interval: 15s
scrape_timeout: 10s
static_configs:
- targets:
- 192.168.50.236:9187
grafana:
enabled: false
alertmanager:

View file

@ -53,7 +53,7 @@ data:
grpc_listen_port: 0
clients:
- url: http://monitoring:3030/loki/api/v1/push
- url: http://100.126.232.130:3030/loki/api/v1/push
positions:
filename: /tmp/positions.yaml
@ -127,7 +127,7 @@ metadata:
subjects:
- kind: ServiceAccount
name: promtail-serviceaccount
namespace: default
namespace: promtail
roleRef:
kind: ClusterRole
name: promtail-clusterrole

149
homelab/snikket.nix Normal file
View file

@ -0,0 +1,149 @@
let
appName = "snikket";
snikketImage = "git.gmem.ca/arch/snikket-server:latest";
snikketPortalImage = "snikket/snikket-web-portal:stable";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.snikket = {
metadata.namespace = "snikket";
spec = {
selector.app = appName;
ports.http = {
port = 5280;
targetPort = 5280;
};
};
};
kubernetes.resources.services.snikket-xmpp = {
metadata.namespace = "snikket";
spec = {
type = "NodePort";
selector.app = appName;
ports.http = {
port = 5222;
targetPort = 5222;
nodePort = 5222;
};
};
};
kubernetes.resources.services.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.app = appName + "-web-portal";
ports.http = {
port = 5765;
targetPort = 5765;
};
};
};
kubernetes.resources.deployments.snikket = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
snikket = {
image = snikketImage;
env.SNIKKET_TWEAK_TURNSERVER.value = "0";
env.SNIKKET_TWEAK_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
envFrom = [{configMapRef.name = "snikket";}];
imagePullPolicy = "Always";
volumeMounts = [
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.crt";
subPath = "tls.crt";
}
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.key";
subPath = "tls.key";
}
];
ports.http.containerPort = 5280;
};
};
volumes = {
certs.secret.secretName = "chat-gmem-ca";
};
};
};
};
};
kubernetes.resources.deployments.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName + "-web-portal";
template = {
metadata.labels.app = appName + "-web-portal";
spec = {
containers = {
snikket = {
image = snikketPortalImage;
env.SNIKKET_TWEAK_PORTAL_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
env.SNIKKET_WEB_PROSODY_ENDPOINT.value = "http://snikket:5280";
imagePullPolicy = "Always";
ports.http.containerPort = 5765;
};
};
};
};
};
};
kubernetes.resources.ingresses.snikket = {
metadata = {
name = appName;
namespace = "snikket";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["chat.gmem.ca"];
}
];
rules = [
{
host = "chat.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName + "-web-portal";
port.name = "http";
};
}
]
++ lib.lists.forEach [
# Routes we want to hit Prosody's backend
"/admin_api"
"/invites_api"
"/invites_bootstrap"
"/upload"
"/http-bind"
"/xmpp-websocket"
"/.well-known/host-meta"
"/.well-known/host-meta.json"
] (path: {
path = path;
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
});
}
];
};
};
}

View file

@ -1,108 +1,50 @@
let
appName = "tclip";
litestreamImage = "litestream/litestream:sha-749bc0d";
tclipImage = "git.gmem.ca/arch/tclip:arm";
in {
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.tclip = {
kubernetes.resources.statefulSets.tclip = {
metadata.namespace = "tclip";
spec = {
serviceName = appName;
selector.matchLabels.app = appName;
endpoints = [
{
port = "metrics";
interval = "30s";
}
];
};
};
kubernetes.resources.services.tclip = {
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.metrics = {
port = 9090;
targetPort = 9090;
};
};
};
kubernetes.resources.statefulSets.tclip.spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
litestream.configMap.name = "tclip-litestream";
config.configMap.name = "tclip";
};
initContainers.init-litestream = {
image = litestreamImage;
args = ["restore" "-if-db-not-exists" "-if-replica-exists" "-v" "/data/data.db"];
volumeMounts = [
{
name = "data";
mountPath = "/data";
}
{
name = "litestream";
mountPath = "/etc/litestream.yml";
subPath = "tclip.yml";
}
];
envFrom = [{secretRef.name = "tclip-litestream-s3";}];
};
containers = {
tclip = {
image = tclipImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "data";
mountPath = "/data";
}
];
env = [
{
name = "DATA_DIR";
value = "/data";
}
{
name = "USE_FUNNEL";
value = "true";
}
];
};
litestream = {
image = litestreamImage;
args = ["replicate"];
volumeMounts = [
{
name = "data";
mountPath = "/data";
}
{
name = "litestream";
mountPath = "/etc/litestream.yml";
subPath = "tclip.yml";
}
];
envFrom = [{secretRef.name = "tclip-litestream-s3";}];
ports.metrics = {
containerPort = 9090;
name = "metrics";
template = {
metadata.labels.app = appName;
spec = {
containers = {
tclip = {
image = tclipImage;
imagePullPolicy = "Always";
env = [
{
name = "DATA_DIR";
value = "/state";
}
{
name = "USE_FUNNEL";
value = "true";
}
];
envFrom = [{secretRef.name = "tclip";}];
volumeMounts = [
{
name = "state";
mountPath = "/state";
}
];
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "state";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "512Mi";
};
}
];
};
volumeClaimTemplates = [
{
metadata.name = "data";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "1Gi";
};
}
];
};
}

View file

@ -2,6 +2,7 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: it-tools
namespace: it-tools
spec:
selector:
matchLabels:
@ -26,6 +27,7 @@ apiVersion: v1
kind: Service
metadata:
name: it-tools
namespace: it-tools
spec:
selector:
app: it-tools
@ -37,15 +39,14 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: it-tools
namespace: it-tools
annotations:
cert-manager.io/issuer: "le-issuer"
cert-manager.io/cluster-issuer: "le-issuer"
nginx.ingress.kubernetes.io/proxy-body-size: 100m
namespace: default
spec:
tls:
- hosts:
- tools.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: tools.gmem.ca
http:

View file

@ -1,90 +1,46 @@
apiVersion: apps/v1
kind: StatefulSet
kind: Deployment
metadata:
name: vaultwarden
spec:
replicas: 1
selector:
matchLabels:
app: vaultwarden
serviceName: vaultwarden
replicas: 1
template:
metadata:
labels:
app: vaultwarden
spec:
volumes:
- name: litestream
configMap:
name: vaultwarden-litestream
- name: config
configMap:
name: vaultwarden
initContainers:
- name: init-litestream
image: litestream/litestream:0.3.11
args: ['restore', '-if-db-not-exists', '-if-replica-exists', '-v', '/data/db.sqlite3']
volumeMounts:
- name: data
mountPath: /data
- name: litestream
mountPath: /etc/litestream.yml
subPath: vaultwarden.yml
- name: data
emptyDir: {}
containers:
- name: vaultwarden
image: docker.io/vaultwarden/server:testing
imagePullPolicy: Always
resources:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "64Mi"
cpu: "100m"
envFrom:
- secretRef:
name: vaultwarden-litestream-s3
containers:
- name: vaultwarden
image: docker.io/vaultwarden/server:testing
imagePullPolicy: Always
resources:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "64Mi"
cpu: "100m"
ports:
- containerPort: 80
name: web
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /data/config.json
subPath: vaultwarden.json
- name: litestream
image: litestream/litestream:0.3.11
args: ['replicate']
volumeMounts:
- name: data
mountPath: /data
- name: litestream
mountPath: /etc/litestream.yml
subPath: vaultwarden.yml
envFrom:
- secretRef:
name: vaultwarden-litestream-s3
ports:
- name: metrics
containerPort: 9090
resources:
limits:
memory: "128Mi"
cpu: "300m"
requests:
memory: "64Mi"
cpu: "100m"
volumeClaimTemplates:
- metadata:
name: data
spec:
storageClassName: nfs-client
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
name: vaultwarden
ports:
- containerPort: 80
name: web
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /data/config.json
subPath: vaultwarden.json
---
apiVersion: v1
kind: Service
@ -99,31 +55,13 @@ spec:
- port: 80
targetPort: 80
name: web
- port: 9090
targetPort: 9090
name: metrics
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: vaultwarden
spec:
selector:
matchLabels:
app: vaultwarden
endpoints:
- port: metrics
interval: 30s
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: vaultwarden
annotations:
cert-manager.io/issuer: "le-issuer"
nginx.ingress.kubernetes.io/configuration-snippet: |
more_set_headers "X-Forwarded-For $http_x_forwarded_for";
namespace: default
cert-manager.io/cluser-issuer: "le-issuer"
spec:
tls:
- hosts:

View file

@ -3,6 +3,7 @@ let
appImage = "git.gmem.ca/arch/vrchat-prometheus-adapter:arm";
in {
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.vrchat-prometheus-adapter = {
metadata.namespace = "vrchat";
spec = {
selector.matchLabels.app = appName;
endpoints = [
@ -14,6 +15,7 @@ in {
};
};
kubernetes.resources.services.vrchat-prometheus-adapter = {
metadata.namespace = "vrchat";
metadata.labels.app = appName;
spec = {
selector.app = appName;
@ -23,35 +25,38 @@ in {
};
};
};
kubernetes.resources.deployments.vrchat-prometheus-adapter.spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "vrchat-prometheus-adapter";
};
containers = {
vrchat-prometheus-adapter = {
image = appImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/config.toml";
subPath = "config.toml";
}
];
envFrom = [{secretRef.name = "vrchat-prometheus-adapter";}];
ports.metrics.containerPort = 6534;
resources = {
requests = {
cpu = "50m";
memory = "32Mi";
};
limits = {
cpu = "500m";
memory = "256Mi";
kubernetes.resources.deployments.vrchat-prometheus-adapter = {
metadata.namespace = "vrchat";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "vrchat-prometheus-adapter";
};
containers = {
vrchat-prometheus-adapter = {
image = appImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/config.toml";
subPath = "config.toml";
}
];
envFrom = [{secretRef.name = "vrchat-prometheus-adapter";}];
ports.metrics.containerPort = 6534;
resources = {
requests = {
cpu = "50m";
memory = "32Mi";
};
limits = {
cpu = "500m";
memory = "256Mi";
};
};
};
};

View file

@ -12,7 +12,7 @@
(modulesPath + "/profiles/qemu-guest.nix")
''${builtins.fetchTarball {
url = "https://github.com/nix-community/disko/archive/master.tar.gz";
sha256 = "0qyl65hs2j4f5ffj2lv5kb4hc1gradkqvv2j35hbdyiik155l4gn";
sha256 = "1dk4xi79lvm8hv1raf2snm3j8y4q23csm6d3siljg4cpf2y4wyl7";
}}/module.nix''
./disk-config.nix
];

View file

@ -12,7 +12,7 @@
(modulesPath + "/profiles/qemu-guest.nix")
''${builtins.fetchTarball {
url = "https://github.com/nix-community/disko/archive/master.tar.gz";
sha256 = "0qyl65hs2j4f5ffj2lv5kb4hc1gradkqvv2j35hbdyiik155l4gn";
sha256 = "1dk4xi79lvm8hv1raf2snm3j8y4q23csm6d3siljg4cpf2y4wyl7";
}}/module.nix''
./disk-config.nix
];

View file

@ -1,113 +0,0 @@
{
config,
pkgs,
...
}: {
imports = [
# Include the results of the hardware scan.
./hardware.nix
];
age.secrets.k3s-token = {
file = ../../secrets/k3s-token.age;
owner = "root";
};
boot = {
supportedFilesystems = ["nfs"];
kernelPackages = pkgs.linuxPackages;
kernelParams = ["cgroup_enable=memory" "cgroup_enable=cpuset" "cgroup_memory=1"];
loader = {
grub.enable = false;
generic-extlinux-compatible.enable = true;
};
};
swapDevices = [
{
device = "/var/lib/swapfile";
size = 8 * 1024;
}
];
nix = {
settings = {
auto-optimise-store = true;
experimental-features = ["nix-command" "flakes"];
};
gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Free up to 1GiB whenever there is less than 100MiB left.
extraOptions = ''
min-free = ${toString (100 * 1024 * 1024)}
max-free = ${toString (1024 * 1024 * 1024)}
'';
};
networking = {
hostName = "glasgow";
domain = "gmem.ca";
firewall = {
trustedInterfaces = ["tailscale0"];
checkReversePath = "loose";
allowedUDPPorts = [41641];
allowedTCPPorts = [22 80 443 6443 10250];
enable = false;
};
nftables.enable = false;
};
time.timeZone = "Europe/London";
users.users.gsimmer = {
isNormalUser = true;
extraGroups = ["wheel"];
packages = with pkgs; [
tree
];
openssh.authorizedKeys.keys = let
authorizedKeys = pkgs.fetchurl {
url = "https://gmem.ca/ssh";
hash = "sha256-7PpFDgWVfp26c9PuW+2s3O8MBAODtHr4q7WU/l3BoG4=";
};
in
pkgs.lib.splitString "\n" (builtins.readFile
authorizedKeys);
};
environment.systemPackages = with pkgs; [
vim
wget
htop
git
screen
nix-output-monitor
tailscale
nfs-utils
libraspberrypi
];
services = {
rpcbind.enable = true;
openssh.enable = true;
tailscale.enable = true;
k3s = {
enable = true;
role = "agent";
serverAddr = "https://100.77.43.133:6443";
tokenFile = config.age.secrets.k3s-token.path;
};
};
hardware = {
bluetooth = {
enable = true;
powerOnBoot = true;
};
};
system.stateVersion = "23.11"; # dId YoU rEaD tHe CoMmEnT?
}

View file

@ -1,37 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}: {
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = ["xhci_pci" "uas"];
boot.initrd.kernelModules = [];
boot.kernelModules = [];
boot.extraModulePackages = [];
fileSystems."/" = {
device = "/dev/disk/by-uuid/44444444-4444-4444-8888-888888888888";
fsType = "ext4";
};
swapDevices = [];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.end0.useDHCP = lib.mkDefault true;
# networking.interfaces.wlan0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
}

View file

@ -22,7 +22,7 @@
in [
(import (builtins.fetchTarball {
url = "https://github.com/nix-community/emacs-overlay/archive/master.tar.gz";
sha256 = "1dqmw3v3w8grqyc492hadxswvj0dfw4w2mbb4nmfcmnanr5i3ys3";
sha256 = "0yy91pryh8pbq2sz07nzjb11s5ghrn9773v0vsh475an4g4p9933";
}))
discordOverlay
];

View file

@ -30,7 +30,7 @@
fileSystems."/tmp" = {
device = "tmpfs";
fsType = "tmpfs";
options = [ "size=1G" "mode=777" ]; # mode=755 so only root can write to those files
options = [ "size=4G" "mode=777" ]; # mode=755 so only root can write to those files
};
swapDevices = [

View file

@ -118,6 +118,16 @@
period = "24h";
};
}
{
from = "2024-05-01";
store = "tsdb";
object_store = "filesystem";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
};
@ -127,6 +137,11 @@
cache_location = "/var/lib/loki/boltdb-shipper-cache";
cache_ttl = "24h";
};
tsdb_shipper = {
active_index_directory = "/var/lib/loki/tsdb-shipper-active";
cache_location = "/var/lib/loki/tsdb-shipper-cache";
cache_ttl = "24h";
};
filesystem = {
directory = "/var/lib/loki/chunks";
@ -394,6 +409,11 @@
}
];
}
{
job_name = "haproxy";
scrape_interval = "10s";
static_configs = [{targets = ["100.87.208.14:8404"];}];
}
];
exporters.node = {
enable = true;

View file

@ -1,107 +0,0 @@
{
config,
pkgs,
...
}: {
imports = [
# Include the results of the hardware scan.
./hardware.nix
];
boot = {
supportedFilesystems = ["nfs"];
kernelPackages = pkgs.linuxPackages_rpi4;
kernelParams = ["cgroup_enable=memory" "cgroup_enable=cpuset" "cgroup_memory=1"];
loader = {
grub.enable = false;
generic-extlinux-compatible.enable = true;
};
};
nix = {
settings = {
auto-optimise-store = true;
experimental-features = ["nix-command" "flakes"];
};
gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
# Free up to 1GiB whenever there is less than 100MiB left.
extraOptions = ''
min-free = ${toString (100 * 1024 * 1024)}
max-free = ${toString (1024 * 1024 * 1024)}
'';
};
networking = {
hostName = "seattle";
domain = "gmem.ca";
firewall = {
trustedInterfaces = ["tailscale0"];
checkReversePath = "loose";
allowedTCPPorts = [22 80 443 6443 10250];
allowedUDPPorts = [41641 80 443];
enable = false;
};
nftables.enable = false;
};
time.timeZone = "Europe/London";
users.users.gsimmer = {
isNormalUser = true;
extraGroups = ["wheel"];
packages = with pkgs; [
tree
];
openssh.authorizedKeys.keys = let
authorizedKeys = pkgs.fetchurl {
url = "https://gmem.ca/ssh";
hash = "sha256-7PpFDgWVfp26c9PuW+2s3O8MBAODtHr4q7WU/l3BoG4=";
};
in
pkgs.lib.splitString "\n" (builtins.readFile
authorizedKeys);
};
environment.systemPackages = with pkgs; [
vim
wget
htop
git
screen
nix-output-monitor
tailscale
nfs-utils
libraspberrypi
];
services = {
rpcbind.enable = true;
openssh.enable = true;
tailscale.enable = true;
k3s = {
enable = true;
role = "server";
extraFlags = toString [
"--secrets-encryption --disable=traefik,servicelb --kube-apiserver-arg service-node-port-range=69-32767"
];
};
};
hardware = {
bluetooth = {
enable = true;
powerOnBoot = true;
};
raspberry-pi."4".apply-overlays-dtmerge.enable = true;
deviceTree = {
enable = true;
filter = "*rpi-4-*.dtb";
};
};
system.stateVersion = "23.11"; # dId YoU rEaD tHe CoMmEnT?
}

View file

@ -1,37 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{
config,
lib,
pkgs,
modulesPath,
...
}: {
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = ["xhci_pci" "uas"];
boot.initrd.kernelModules = [];
boot.kernelModules = [];
boot.extraModulePackages = [];
fileSystems."/" = {
device = "/dev/disk/by-uuid/44444444-4444-4444-8888-888888888888";
fsType = "ext4";
};
swapDevices = [];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's
# still possible to use this option, but it's recommended to use it in conjunction
# with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
networking.useDHCP = lib.mkDefault true;
# networking.interfaces.end0.useDHCP = lib.mkDefault true;
# networking.interfaces.wlan0.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
}