diff --git a/homelab/atuin.yaml b/homelab/atuin.yaml index 5ddd495..dee313b 100644 --- a/homelab/atuin.yaml +++ b/homelab/atuin.yaml @@ -3,6 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: atuin + namespace: atuin spec: replicas: 1 selector: @@ -18,12 +19,10 @@ spec: - server - start env: - - name: RUST_LOG - value: debug,atuin_server=debug - name: ATUIN_DB_URI valueFrom: secretKeyRef: - name: hippo-pguser-atuin + name: postgres-atuin key: uri optional: false - name: ATUIN_HOST @@ -31,8 +30,8 @@ spec: - name: ATUIN_PORT value: "8888" - name: ATUIN_OPEN_REGISTRATION - value: "true" - image: ghcr.io/atuinsh/atuin:v18.0.0 + value: "false" + image: ghcr.io/atuinsh/atuin:v18.2.0 name: atuin ports: - containerPort: 8888 @@ -62,6 +61,7 @@ apiVersion: v1 kind: Service metadata: name: atuin + namespace: atuin spec: selector: app: atuin @@ -74,15 +74,14 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: atuin + namespace: atuin annotations: - cert-manager.io/issuer: "le-issuer" + cert-manager.io/cluster-issuer: "le-issuer" nginx.ingress.kubernetes.io/proxy-body-size: 1024m - namespace: default spec: tls: - hosts: - atuin.gmem.ca - secretName: gmem-ca-wildcard rules: - host: atuin.gmem.ca http: diff --git a/homelab/authentik.yml b/homelab/authentik.yml index f2b846d..9493be2 100644 --- a/homelab/authentik.yml +++ b/homelab/authentik.yml @@ -13,24 +13,21 @@ global: name: authentik-secrets key: secret-key - name: AUTHENTIK_POSTGRESQL__HOST - valueFrom: - secretKeyRef: - name: hippo-pguser-authentik - key: host + value: 192.168.50.236 - name: AUTHENTIK_POSTGRESQL__PASSWORD valueFrom: secretKeyRef: - name: hippo-pguser-authentik + name: postgres-authentik key: password - name: AUTHENTIK_POSTGRESQL__USER valueFrom: secretKeyRef: - name: hippo-pguser-authentik + name: postgres-authentik key: user - name: AUTHENTIK_POSTGRESQL__PORT valueFrom: secretKeyRef: - name: hippo-pguser-authentik + name: postgres-authentik key: port server: @@ -44,6 +41,5 @@ server: tls: - hosts: - authentik.gmem.ca - secretName: gmem-ca-wildcard redis: enabled: true diff --git a/homelab/cloudflare-exporter.nix b/homelab/cloudflare-exporter.nix index b4f6544..9ade583 100644 --- a/homelab/cloudflare-exporter.nix +++ b/homelab/cloudflare-exporter.nix @@ -5,7 +5,7 @@ ... }: { kubernetes.helm.releases.cloudflare-exporter = { - namespace = "default"; + namespace = "cloudflare"; chart = kubenix.lib.helm.fetch { repo = "https://lablabs.github.io/cloudflare-exporter"; chart = "cloudflare-exporter"; diff --git a/homelab/cloudflared.yml b/homelab/cloudflared.yml index 4889c51..cc0982f 100644 --- a/homelab/cloudflared.yml +++ b/homelab/cloudflared.yml @@ -3,11 +3,13 @@ apiVersion: apps/v1 kind: Deployment metadata: name: cloudflared + namespace: cloudflare + spec: selector: matchLabels: app: cloudflared - replicas: 3 + replicas: 2 template: metadata: labels: @@ -15,7 +17,7 @@ spec: spec: containers: - name: cloudflared - image: cloudflare/cloudflared:2024.2.1 + image: cloudflare/cloudflared:2024.4.1 args: - tunnel - --config @@ -55,6 +57,8 @@ apiVersion: v1 kind: Service metadata: name: cloudflared-metrics + namespace: cloudflare + spec: selector: app: cloudflared @@ -67,6 +71,7 @@ apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: name: cloudflared + namespace: cloudflare labels: release: prometheus spec: @@ -76,3 +81,35 @@ spec: podMetricsEndpoints: - port: metrics interval: 30s +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloudflared + namespace: cloudflare +data: + config.yaml: | + tunnel: new-homelab + credentials-file: /etc/cloudflared/creds/credentials.json + metrics: 0.0.0.0:2000 + no-autoupdate: true + ingress: + - hostname: photos.gmem.ca + service: http://immich-server.immich.svc.cluster.local:3001 + - hostname: pw.gmem.ca + service: http://vaultwarden.vaultwarden.svc.cluster.local:80 + - hostname: authentik.gmem.ca + service: http://authentik-server.authentik.svc.cluster.local:80 + - hostname: nitter.gmem.ca + service: http://nitter.nitter.svc.cluster.local:8081 + - hostname: git.gmem.ca + service: http://192.168.50.229 + - hostname: proxmox.gmem.ca + service: http://proxmox.endpoints.svc.cluster.local:8006 + - hostname: tokyo.gmem.ca + service: http://tokyo.endpoints.svc.cluster.local:8000 + - hostname: ibiza.gmem.ca + service: http://ibiza.endpoints.svc.cluster.local:8000 + - hostname: chat.gmem.ca + service: tcp://192.168.50.45:443 + - service: http_status:404 diff --git a/homelab/endpoints.nix b/homelab/endpoints.nix index bd5d9f5..67e4632 100644 --- a/homelab/endpoints.nix +++ b/homelab/endpoints.nix @@ -1,7 +1,7 @@ let endpoints = { "proxmox" = { - location = "100.100.75.80"; + location = "192.168.50.3"; host = "proxmox.gmem.ca"; port = 8006; protocol = "HTTPS"; @@ -28,6 +28,7 @@ let in { kubernetes.resources.services = builtins.mapAttrs (name: endpoint: { + metadata.namespace = "endpoints"; spec = { ports.${name} = { port = endpoint.port; @@ -38,6 +39,7 @@ in { endpoints; kubernetes.resources.endpoints = builtins.mapAttrs (name: endpoint: { + metadata.namespace = "endpoints"; subsets = [ { addresses = [{ip = endpoint.location;}]; @@ -56,9 +58,10 @@ in { builtins.mapAttrs (name: endpoint: { metadata = { name = name; + namespace = "endpoints"; annotations = { "nginx.ingress.kubernetes.io/proxy-body-size" = "10g"; - "cert-manager.io/issuer" = "le-issuer"; + "cert-manager.io/cluser-issuer" = "le-issuer"; "nginx.ingress.kubernetes.io/backend-protocol" = endpoint.protocol; }; }; @@ -66,7 +69,6 @@ in { tls = [ { hosts = [endpoint.host]; - secretName = "gmem-ca-wildcard"; } ]; rules = [ diff --git a/homelab/freshrss.yaml b/homelab/freshrss.yaml deleted file mode 100644 index fd03857..0000000 --- a/homelab/freshrss.yaml +++ /dev/null @@ -1,103 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: freshrss -spec: - selector: - matchLabels: - app: freshrss - template: - metadata: - labels: - app: freshrss - spec: - containers: - - name: freshrss - image: freshrss/freshrss:1.22.1-arm - resources: - limits: - memory: "256Mi" - cpu: "500m" - ports: - - containerPort: 80 - env: - - name: CRON_MIN - value: 1,31 - envFrom: - - configMapRef: - name: freshrss-config - - secretRef: - name: freshrss-secrets - volumeMounts: - - name: data - mountPath: /var/www/FreshRSS/data - - name: extension-data - mountPath: /var/www/FreshRSS/data/extensions - volumes: - - name: data - persistentVolumeClaim: - claimName: freshrss-data - - name: extension-data - persistentVolumeClaim: - claimName: freshrss-extension-data ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: freshrss-data -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: nfs-client ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: freshrss-extension-data -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: nfs-client ---- -apiVersion: v1 -kind: Service -metadata: - name: freshrss -spec: - type: ClusterIP - selector: - app: freshrss - ports: - - port: 80 - targetPort: 80 ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: freshrss - annotations: - cert-manager.io/issuer: "le-issuer" - namespace: default -spec: - tls: - - hosts: - - freshrss.gmem.ca - secretName: gmem-ca-wildcard - rules: - - host: freshrss.gmem.ca - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: freshrss - port: - number: 80 diff --git a/homelab/homebridge.yaml b/homelab/homebridge.yaml index 16db81f..4d55cfd 100644 --- a/homelab/homebridge.yaml +++ b/homelab/homebridge.yaml @@ -72,13 +72,11 @@ kind: Ingress metadata: name: homebridge annotations: - cert-manager.io/issuer: "le-issuer" - namespace: default + cert-manager.io/cluster-issuer: "le-issuer" spec: tls: - hosts: - hb.gmem.ca - secretName: gmem-ca-wildcard rules: - host: hb.gmem.ca http: diff --git a/homelab/homepage.nix b/homelab/homepage.nix index 87fd274..9a6273a 100644 --- a/homelab/homepage.nix +++ b/homelab/homepage.nix @@ -16,7 +16,7 @@ description = "Plex"; widget = { type = "plex"; - url = "http://vancouver:32400"; + url = "http://192.168.50.229:32400"; key = "{{HOMEPAGE_VAR_PLEX_KEY}}"; }; }; @@ -142,16 +142,10 @@ { Reading = [ { - FreshRSS = { - icon = "freshrss.png"; - href = "https://freshrss.gmem.ca"; - description = "FreshRSS RSS Reader"; - widget = { - type = "freshrss"; - url = "https://freshrss.gmem.ca"; - username = "arch"; - password = "{{HOMEPAGE_VAR_FRESHRSS_PASSWORD}}"; - }; + miniflux = { + icon = "miniflux.png"; + href = "https://rss.gmem.ca"; + description = "Miniflux RSS Reader"; }; } { @@ -309,6 +303,7 @@ }; in { kubernetes.helm.releases.homepage = { + namespace = "homepage"; chart = kubenix.lib.helm.fetch { repo = "https://jameswynn.github.io/helm-charts"; chart = "homepage"; @@ -350,7 +345,7 @@ in { }; kubernetes.resources.deployments.homepage = { - metadata.namespace = "default"; + metadata.namespace = "homepage"; spec.template = { metadata.annotations."gmem.ca/homepage-config-hash" = builtins.hashString "md5" (builtins.toJSON homepage-config); diff --git a/homelab/hue.yml b/homelab/hue.yml index 578600f..5d1f559 100644 --- a/homelab/hue.yml +++ b/homelab/hue.yml @@ -2,7 +2,6 @@ apiVersion: apps/v1 kind: Deployment metadata: name: hue - namespace: default spec: selector: matchLabels: @@ -14,7 +13,7 @@ spec: spec: containers: - name: hue - image: icr.gmem.ca/hue + image: git.gmem.ca/arch/hue resources: limits: memory: "32Mi" @@ -54,13 +53,11 @@ kind: Ingress metadata: name: hue annotations: - cert-manager.io/issuer: "le-issuer" - namespace: default + cert-manager.io/cluser-issuer: "le-issuer" spec: tls: - hosts: - hue.gmem.ca - secretName: gmem-ca-wildcard rules: - host: hue.gmem.ca http: diff --git a/homelab/immich.nix b/homelab/immich.nix index b16f2ef..115d199 100644 --- a/homelab/immich.nix +++ b/homelab/immich.nix @@ -5,34 +5,37 @@ ... }: { kubernetes.helm.releases.immich = { + namespace = "immich"; chart = kubenix.lib.helm.fetch { repo = "https://immich-app.github.io/immich-charts"; chart = "immich"; - version = "0.4.0"; - sha256 = "qekwsAke6NBwhlbt7nIkuwTSIydcWOq/kETooYb64oY="; + version = "0.6.0"; + sha256 = "p9fgqRMxRJ2rMBZZfMKuAIjp/N1/KgKCKLDhoXO0O6c="; }; # arbitrary attrset passed as values to the helm release values = { - image.tag = "v1.98.2"; + image.tag = "v1.102.3"; machine-learning.enabled = false; immich.persistence.library.existingClaim = "immich"; - redis.enabled = true; + redis = { + enabled = true; + }; env = { PGSSLMODE = "no-verify"; - DB_URL.valueFrom.secretKeyRef = { - name = "hippo-pguser-immich"; - key = "uri"; + DB_PASSWORD.valueFrom.secretKeyRef = { + name = "postgres-immich"; + key = "password"; }; + DB_HOSTNAME.value = "192.168.50.236"; }; server.ingress.main = { enabled = true; annotations = { - "cert-manager.io/issuer" = "le-issuer"; + "cert-manager.io/cluster-issuer" = "le-issuer"; }; tls = [ { hosts = ["photos.gmem.ca"]; - secretName = "gmem-ca-wildcard"; } ]; hosts = [ @@ -46,7 +49,10 @@ }; kubernetes.resources.persistentVolumeClaims.immich = { - metadata.name = "immich"; + metadata = { + name = "immich"; + namespace = "immich"; + }; spec = { accessModes = ["ReadWriteOnce"]; resources.requests.storage = "50Gi"; diff --git a/homelab/irc.nix b/homelab/irc.nix index f912b9d..9cd713f 100644 --- a/homelab/irc.nix +++ b/homelab/irc.nix @@ -4,16 +4,19 @@ let gamjaImage = "git.gmem.ca/arch/gamja:latest"; in { kubernetes.resources.services.soju = { + metadata.namespace = "irc"; spec = { type = "NodePort"; selector.app = appName; ports.tls = { port = 6697; targetPort = 6697; + nodePort = 6697; }; }; }; kubernetes.resources.services.soju-ws = { + metadata.namespace = "irc"; spec = { selector.app = appName; ports.ws = { @@ -23,6 +26,7 @@ in { }; }; kubernetes.resources.services.gamja = { + metadata.namespace = "irc"; spec = { selector.app = "gamja"; ports.http = { @@ -31,55 +35,57 @@ in { }; }; }; - kubernetes.resources.deployments.soju.spec = { - selector.matchLabels.app = appName; - template = { - metadata.labels.app = appName; - spec = { - volumes = { - config.configMap.name = "soju"; - ssl.secret.secretName = "gmem-ca-wildcard"; - }; - containers = { - soju = { - image = sojuImage; - imagePullPolicy = "Always"; - volumeMounts = [ - { - name = "config"; - mountPath = "/etc/soju/config"; - subPath = "config"; - } - { - name = "ssl"; - mountPath = "/ssl"; - } - ]; - ports.tls.containerPort = 6697; - ports.ws.containerPort = 80; + kubernetes.resources.deployments.soju = { + metadata.namespace = "irc"; + spec = { + selector.matchLabels.app = appName; + template = { + metadata.labels.app = appName; + spec = { + volumes = { + config.configMap.name = "soju"; + ssl.secret.secretName = "irc-gmem-ca"; + }; + containers = { + soju = { + image = sojuImage; + imagePullPolicy = "Always"; + volumeMounts = [ + { + name = "config"; + mountPath = "/etc/soju/config"; + subPath = "config"; + } + { + name = "ssl"; + mountPath = "/ssl"; + } + ]; + ports.tls.containerPort = 6697; + ports.ws.containerPort = 80; - env.PGHOST.valueFrom.secretKeyRef = { - name = "hippo-pguser-soju"; - key = "host"; - }; - env.PGPASSWORD.valueFrom.secretKeyRef = { - name = "hippo-pguser-soju"; - key = "password"; - }; + env.PGHOST.value = "192.168.50.236"; + env.PGPASSWORD.valueFrom.secretKeyRef = { + name = "postgres-soju"; + key = "password"; + }; env.PGUSER.valueFrom.secretKeyRef = { - name = "hippo-pguser-soju"; + name = "postgres-soju"; key = "user"; }; env.PGDATABASE.valueFrom.secretKeyRef = { - name = "hippo-pguser-soju"; + name = "postgres-soju"; key = "dbname"; }; }; }; }; }; + }; }; - kubernetes.resources.deployments.gamja.spec = { + kubernetes.resources.deployments.gamja = { + metadata.namespace = "irc"; + spec = { selector.matchLabels.app = "gamja"; template = { metadata.labels.app = "gamja"; @@ -93,17 +99,20 @@ in { }; }; }; + }; }; kubernetes.resources.ingresses.irc = { + metadata.namespace = "irc"; metadata.annotations = { - "cert-manager.io/issuer" = "le-issuer"; + "cert-manager.io/cluster-issuer" = "le-issuer"; + "nginx.ingress.kubernetes.io/proxy-read-timeout" = "3600"; + "nginx.ingress.kubernetes.io/proxy-send-timeout" = "3600"; }; spec = { tls = [ { hosts = ["irc.gmem.ca"]; - secretName = "gmem-ca-wildcard"; } ]; rules = [ @@ -132,7 +141,9 @@ in { }; }; - kubernetes.resources.configMaps.soju.data.config = '' + kubernetes.resources.configMaps.soju = { + metadata.namespace = "irc"; + data.config = '' listen ircs:// listen unix+admin:///app/admin listen ws+insecure:// @@ -142,4 +153,5 @@ in { message-store db tls /ssl/tls.crt /ssl/tls.key ''; + }; } diff --git a/homelab/issuer.yml b/homelab/issuer.yml index ea3b3ad..49692e5 100644 --- a/homelab/issuer.yml +++ b/homelab/issuer.yml @@ -1,5 +1,5 @@ apiVersion: cert-manager.io/v1 -kind: Issuer +kind: ClusterIssuer metadata: name: le-issuer spec: @@ -29,7 +29,7 @@ metadata: spec: secretName: gmem-ca-wildcard issuerRef: - kind: Issuer + kind: ClusterIssuer name: le-issuer commonName: "*.gmem.ca" dnsNames: diff --git a/homelab/kubernetes.nix b/homelab/kubernetes.nix index 3dbf392..8267b2d 100644 --- a/homelab/kubernetes.nix +++ b/homelab/kubernetes.nix @@ -15,15 +15,16 @@ (import ./immich.nix) (import ./endpoints.nix) (import ./homepage.nix) - # (import ./pterodactyl.nix) (import ./cloudflare-exporter.nix) (import ./piped.nix) - (import ./conduit.nix) + # (import ./conduit.nix) (import ./irc.nix) - (import ./netboot.nix) + # (import ./netboot.nix) (import ./nitter.nix) - (import ./changedetection.nix) + # (import ./changedetection.nix) (import ./nextdns-exporter.nix) (import ./nitter-bot.nix) + (import ./miniflux.nix) + # (import ./snikket.nix) ]; } diff --git a/homelab/miniflux.nix b/homelab/miniflux.nix new file mode 100644 index 0000000..1c0a8b9 --- /dev/null +++ b/homelab/miniflux.nix @@ -0,0 +1,103 @@ +let + appName = "miniflux"; + appImage = "docker.io/miniflux/miniflux"; +in +{ + lib, + config, + kubenix, + ... +}: { + kubernetes.resources.deployments.miniflux = { + metadata.namespace = "miniflux"; + spec = { + selector.matchLabels.app = appName; + template = { + metadata.labels.app = appName; + spec = { + containers = { + miniflux = { + image = appImage; + envFrom = [{secretRef.name = "miniflux";} + {configMapRef.name = "miniflux";}]; + resources = { + requests = { + cpu = "1m"; + memory = "256Mi"; + }; + limits = { + cpu = "1"; + memory = "512Mi"; + }; + }; + ports.http.containerPort = 8080; + }; + }; + }; + }; + }; + }; + kubernetes.resources.services.miniflux = { + metadata.namespace = "miniflux"; + metadata.labels.app = appName; + spec = { + selector.app = appName; + ports.http = { + port = 8080; + targetPort = 8080; + }; + }; + }; + + kubernetes.resources.ingresses.miniflux = { + metadata.namespace = "miniflux"; + metadata.annotations = { + "cert-manager.io/cluster-issuer" = "le-issuer"; + }; + spec = { + tls = [ + { + hosts = ["rss.gmem.ca"]; + } + ]; + rules = [ + { + host = "rss.gmem.ca"; + http.paths = [ + { + path = "/"; + pathType = "Prefix"; + backend.service = { + name = "miniflux"; + port.number = 8080; + }; + } + ]; + } + ]; + }; + }; + + kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.miniflux = { + metadata.namespace = "miniflux"; + spec = { + selector.matchLabels.app = appName; + endpoints = [ + { + port = "http"; + interval = "60s"; + } + ]; + }; + }; + + kubernetes.resources.configMaps.miniflux = { + metadata.namespace = "miniflux"; + data = { + CLEANUP_ARCHIVE_UNREAD_DAYS = "60"; + METRICS_COLLECTOR = "1"; + METRICS_ALLOWED_NETWORKS = "0.0.0.0/0"; + BASE_URL = "https://rss.gmem.ca/"; + }; + }; +} diff --git a/homelab/nextdns-exporter.nix b/homelab/nextdns-exporter.nix index 062d8b2..4cb6648 100644 --- a/homelab/nextdns-exporter.nix +++ b/homelab/nextdns-exporter.nix @@ -3,6 +3,7 @@ let nextdns-exporterImage = "ghcr.io/raylas/nextdns-exporter:0.5.3"; in { kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.nextdns-exporter = { + metadata.namespace = "prometheus"; metadata.labels.app = appName; spec = { selector.matchLabels.app = appName; @@ -20,6 +21,7 @@ in { }; kubernetes.resources.services.nextdns-exporter-metrics = { + metadata.namespace = "prometheus"; metadata.labels.app = appName; spec = { selector.app = appName; @@ -34,24 +36,27 @@ in { }; }; - kubernetes.resources.deployments.nextdns-exporter.spec = { - selector.matchLabels.app = appName; - template = { - metadata.labels.app = appName; - spec = { - containers = { - nextdns-exporter = { - image = nextdns-exporterImage; - imagePullPolicy = "Always"; - ports.metrics.containerPort = 9948; - envFrom = [{secretRef.name = "nextdns-exporter";}]; - }; - nextdns-ts-exporter = { - image = nextdns-exporterImage; - imagePullPolicy = "Always"; - ports.metrics.containerPort = 9949; - env.METRICS_PORT.value = "9949"; - envFrom = [{secretRef.name = "nextdns-ts-exporter";}]; + kubernetes.resources.deployments.nextdns-exporter = { + metadata.namespace = "prometheus"; + spec = { + selector.matchLabels.app = appName; + template = { + metadata.labels.app = appName; + spec = { + containers = { + nextdns-exporter = { + image = nextdns-exporterImage; + imagePullPolicy = "Always"; + ports.metrics.containerPort = 9948; + envFrom = [{secretRef.name = "nextdns-exporter";}]; + }; + nextdns-ts-exporter = { + image = nextdns-exporterImage; + imagePullPolicy = "Always"; + ports.metrics.containerPort = 9949; + env.METRICS_PORT.value = "9949"; + envFrom = [{secretRef.name = "nextdns-ts-exporter";}]; + }; }; }; }; diff --git a/homelab/nfs-provisioner-values.yml b/homelab/nfs-provisioner-values.yml index 2824533..927389f 100644 --- a/homelab/nfs-provisioner-values.yml +++ b/homelab/nfs-provisioner-values.yml @@ -1,7 +1,8 @@ nfs: - server: vancouver - path: /Primary/k3scluster + server: 192.168.50.229 + path: /tank/k3scluster storageClass: defaultClass: true archiveOnDelete: false onDelete: delete + diff --git a/homelab/nginx.nix b/homelab/nginx.nix index 6e6545b..74caf26 100644 --- a/homelab/nginx.nix +++ b/homelab/nginx.nix @@ -9,24 +9,32 @@ chart = kubenix.lib.helm.fetch { repo = "https://kubernetes.github.io/ingress-nginx"; chart = "ingress-nginx"; - version = "4.9.1"; - sha256 = "sha256-EJjNTC7nQUbGnS0xgF/eWyKs3vBpRPbbZmwl/pd9/44="; + version = "4.10.1"; + sha256 = "BHRoXG5EtJdCGkzy52brAtEcMEZP+WkNtfBf+cwpNbs="; }; values = { controller = { kind = "DaemonSet"; metrics = { enabled = true; + serviceMonitor.enabled = true; additionalLabels.release = "prometheus"; }; podAnnotations = { "prometheus.io/scrape" = "true"; "prometheus.io/port" = "10254"; }; + tolerations = [ + { + key = "node-role.kubernetes.io/control-plane"; + effect = "NoSchedule"; + } + ]; ingressClassResource.default = true; publishService.enabled = true; service.externalTrafficPolicy = "Local"; hostNetwork = true; + extraArgs.default-ssl-certificate = "cert-manager/gmem-ca-wildcard"; }; }; }; diff --git a/homelab/nitter-bot.nix b/homelab/nitter-bot.nix index 5d441da..0441e67 100644 --- a/homelab/nitter-bot.nix +++ b/homelab/nitter-bot.nix @@ -2,13 +2,15 @@ let appName = "nitter-bot"; appImage = "git.gmem.ca/arch/nitter-bot:latest"; in - { - lib, - config, - kubenix, - ... - }: { - kubernetes.resources.statefulSets.nitter-bot.spec = { +{ + lib, + config, + kubenix, + ... +}: { + kubernetes.resources.statefulSets.nitter-bot = { + metadata.namespace = "nitter"; + spec = { selector.matchLabels.app = appName; serviceName = appName; template = { @@ -17,7 +19,8 @@ in containers = { nitter-bot = { image = appImage; - envFrom = [{secretRef.name = "nitter-bot";}]; + envFrom = [{secretRef.name = "nitter-bot";} + {configMapRef.name = "nitter-bot";}]; resources = { requests = { cpu = "1m"; @@ -33,4 +36,13 @@ in }; }; }; - } + }; + + kubernetes.resources.configMaps.nitter-bot = { + metadata.namespace = "nitter"; + data = { + NITTER_URL = "http://nitter:8080"; + NITTER_EXTERNAL_URL = "https://nitter.gmem.ca"; + }; + }; +} diff --git a/homelab/nitter.nix b/homelab/nitter.nix index 5f69091..a85fae5 100644 --- a/homelab/nitter.nix +++ b/homelab/nitter.nix @@ -9,6 +9,7 @@ in ... }: { kubernetes.resources.services.nitter = { + metadata.namespace = "nitter"; spec = { selector.app = appName; ports.http = { @@ -21,7 +22,9 @@ in }; }; }; - kubernetes.resources.deployments.nitter.spec = { + kubernetes.resources.deployments.nitter = { + metadata.namespace = "nitter"; + spec = { selector.matchLabels.app = appName; template = { metadata.labels.app = appName; @@ -68,8 +71,10 @@ in }; }; }; + }; }; kubernetes.helm.releases.nitter-redis = { + namespace = "nitter"; chart = kubenix.lib.helm.fetch { repo = "https://charts.bitnami.com/bitnami"; chart = "redis"; @@ -84,15 +89,15 @@ in kubernetes.resources.ingresses.nitter = { metadata = { name = appName; + namespace = "nitter"; annotations = { - "cert-manager.io/issuer" = "le-issuer"; + "cert-manager.io/cluster-issuer" = "le-issuer"; }; }; spec = { tls = [ { hosts = ["nitter.gmem.ca"]; - secretName = "gmem-ca-wildcard"; } ]; rules = [ diff --git a/homelab/ntfy.yaml b/homelab/ntfy.yaml index 3979f54..50f09d9 100644 --- a/homelab/ntfy.yaml +++ b/homelab/ntfy.yaml @@ -2,6 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: ntfy + namespace: ntfy spec: selector: matchLabels: @@ -35,6 +36,7 @@ apiVersion: v1 kind: Service metadata: name: ntfy + namespace: ntfy spec: selector: app: ntfy @@ -46,6 +48,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: ntfy + namespace: ntfy data: server.yml: | # Template: https://github.com/binwiederhier/ntfy/blob/main/server/server.yml @@ -58,13 +61,12 @@ kind: Ingress metadata: name: ntfy annotations: - cert-manager.io/issuer: "le-issuer" - namespace: default + cert-manager.io/cluster-issuer: "le-issuer" + namespace: ntfy spec: tls: - hosts: - ntfy.gmem.ca - secretName: gmem-ca-wildcard rules: - host: ntfy.gmem.ca http: diff --git a/homelab/overseerr.nix b/homelab/overseerr.nix index 7e87708..6c0fb14 100644 --- a/homelab/overseerr.nix +++ b/homelab/overseerr.nix @@ -1,8 +1,9 @@ let - appName = "overseerr"; - appImage = "sctx/overseerr"; + appName = "jellyseerr"; + appImage = "git.gmem.ca/arch/jellyseerr:postgres"; in { - kubernetes.resources.services.overseerr = { + kubernetes.resources.services.jellyseerr = { + metadata.namespace = "jellyseerr"; spec = { selector.app = appName; ports.http = { @@ -11,62 +12,57 @@ in { }; }; }; - kubernetes.resources.statefulSets.overseerr.spec = { - selector.matchLabels.app = appName; - serviceName = appName; - template = { - metadata.labels.app = appName; - spec = { - volumes = { - config.configMap.name = "overseerr"; - }; - containers = { - overseerr = { - image = appImage; - volumeMounts = [ - { - name = "data"; - mountPath = "/app/config"; - } - ]; - ports.metrics.containerPort = 5055; - resources = { - requests = { - cpu = "500m"; - memory = "128Mi"; - }; - limits = { - cpu = "1"; - memory = "512Mi"; + kubernetes.resources.deployments.jellyseerr = { + metadata.namespace = "jellyseerr"; + spec = { + selector.matchLabels.app = appName; + template = { + metadata.labels.app = appName; + spec = { + volumes = { + config.configMap.name = "jellyseerr"; + }; + containers = { + jellyseerr = { + image = appImage; + envFrom = [{secretRef.name = "jellyseerr";} + {configMapRef.name = "jellyseerr";}]; + volumeMounts = [ + { + name = "config"; + mountPath = "/app/config/settings.json"; + subPath = "settings.json"; + } + ]; + ports.http.containerPort = 5055; + resources = { + requests = { + cpu = "500m"; + memory = "128Mi"; + }; + limits = { + cpu = "1"; + memory = "512Mi"; + }; }; }; }; }; }; }; - volumeClaimTemplates = [ - { - metadata.name = "data"; - spec = { - storageClassName = "nfs-client"; - accessModes = ["ReadWriteOnce"]; - resources.requests.storage = "1Gi"; - }; - } - ]; }; - kubernetes.resources.ingresses.overseerr = { + kubernetes.resources.ingresses.jellyseerr = { metadata = { name = appName; + namespace = "jellyseerr"; annotations = { - "cert-manager.io/issuer" = "le-issuer"; + "cert-manager.io/cluster-issuer" = "le-issuer"; }; }; spec = { tls = [ { hosts = ["request-media.gmem.ca"]; - secretName = "gmem-ca-wildcard"; } ]; rules = [ diff --git a/homelab/piped.nix b/homelab/piped.nix index 1101fc2..4132138 100644 --- a/homelab/piped.nix +++ b/homelab/piped.nix @@ -5,7 +5,7 @@ ... }: { kubernetes.helm.releases.piped = { - namespace = "default"; + namespace = "piped"; chart = kubenix.lib.helm.fetch { repo = "https://helm.piped.video"; chart = "piped"; @@ -25,13 +25,12 @@ password = "password"; }; }; - fontend.env.BACKEND_HOSTNAME = "pipedapi.gmem.ca"; + frontend.env.BACKEND_HOSTNAME = "pipedapi.gmem.ca"; ingress = { main = { tls = [ { hosts = ["piped.gmem.ca"]; - secretName = "gmem-ca-wildcard"; } ]; hosts = [ @@ -45,7 +44,6 @@ tls = [ { hosts = ["pipedapi.gmem.ca"]; - secretName = "gmem-ca-wildcard"; } ]; hosts = [ @@ -58,8 +56,7 @@ ytproxy = { tls = [ { - hosts = ["ytproxy.gmem.ca"]; - secretName = "gmem-ca-wildcard"; + hosts = ["pipedproxy.gmem.ca"]; } ]; hosts = [ @@ -73,27 +70,30 @@ }; }; - kubernetes.resources.cronJobs.piped-refresh.spec = { + kubernetes.resources.cronJobs.piped-refresh = { + metadata.namespace = "piped"; + spec = { schedule = "*/10 * * * *"; jobTemplate.spec.template.spec = { restartPolicy = "Never"; containers.refresh-subscriptions = { - image = "alpine:3.15"; - envFrom = [{secretRef.name = "hippo-pguser-piped";}]; + image = "debian:bookworm-slim"; + envFrom = [{secretRef.name = "postgres-piped";}]; command = [ - "/bin/ash" + "/bin/bash" "-c" '' - apk --no-cache add postgresql-client curl && + apt update && apt install -y postgresql-client curl export PGPASSWORD=$password && - export subs=$(psql -U piped -h hippo-primary.default.svc -qtAX -c 'select id from public.pubsub;') && + export subs=$(psql -U piped -h 192.168.50.236 -qtAX -c 'select id from public.pubsub;') && while IFS= read -r line; do echo "refreshing $line" - curl -k -S -s -o /dev/null "https://pipedapi.gmem.ca/channel/$line" + curl -k -o /dev/null "http://piped-backend:8080/channel/$line" done < <(printf '%s' "$subs") '' ]; }; }; + }; }; } diff --git a/homelab/postgres-cluster.yml b/homelab/postgres-cluster.yml index 60fd3b4..19c1151 100644 --- a/homelab/postgres-cluster.yml +++ b/homelab/postgres-cluster.yml @@ -11,7 +11,7 @@ spec: name: init-sql instances: - name: instance1 - replicas: 3 + replicas: 1 dataVolumeClaimSpec: accessModes: - "ReadWriteOnce" diff --git a/homelab/prometheus-agent.yml b/homelab/prometheus-agent.yml index 1f84085..1b56499 100644 --- a/homelab/prometheus-agent.yml +++ b/homelab/prometheus-agent.yml @@ -14,6 +14,13 @@ prometheus: password: name: prometheus-remote-basic-auth key: password + additionalScrapeConfigs: + - job_name: postgresql + scrape_interval: 15s + scrape_timeout: 10s + static_configs: + - targets: + - 192.168.50.236:9187 grafana: enabled: false alertmanager: diff --git a/homelab/promtail.yml b/homelab/promtail.yml index bed11fb..de86fb7 100644 --- a/homelab/promtail.yml +++ b/homelab/promtail.yml @@ -53,7 +53,7 @@ data: grpc_listen_port: 0 clients: - - url: http://monitoring:3030/loki/api/v1/push + - url: http://100.126.232.130:3030/loki/api/v1/push positions: filename: /tmp/positions.yaml @@ -127,7 +127,7 @@ metadata: subjects: - kind: ServiceAccount name: promtail-serviceaccount - namespace: default + namespace: promtail roleRef: kind: ClusterRole name: promtail-clusterrole diff --git a/homelab/snikket.nix b/homelab/snikket.nix new file mode 100644 index 0000000..903a974 --- /dev/null +++ b/homelab/snikket.nix @@ -0,0 +1,149 @@ +let + appName = "snikket"; + snikketImage = "git.gmem.ca/arch/snikket-server:latest"; + snikketPortalImage = "snikket/snikket-web-portal:stable"; +in +{ + lib, + config, + kubenix, + ... +}: { + kubernetes.resources.services.snikket = { + metadata.namespace = "snikket"; + spec = { + selector.app = appName; + ports.http = { + port = 5280; + targetPort = 5280; + }; + }; + }; + kubernetes.resources.services.snikket-xmpp = { + metadata.namespace = "snikket"; + spec = { + type = "NodePort"; + selector.app = appName; + ports.http = { + port = 5222; + targetPort = 5222; + nodePort = 5222; + }; + }; + }; + kubernetes.resources.services.snikket-web-portal = { + metadata.namespace = "snikket"; + spec = { + selector.app = appName + "-web-portal"; + ports.http = { + port = 5765; + targetPort = 5765; + }; + }; + }; + kubernetes.resources.deployments.snikket = { + metadata.namespace = "snikket"; + spec = { + selector.matchLabels.app = appName; + template = { + metadata.labels.app = appName; + spec = { + containers = { + snikket = { + image = snikketImage; + env.SNIKKET_TWEAK_TURNSERVER.value = "0"; + env.SNIKKET_TWEAK_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0"; + envFrom = [{configMapRef.name = "snikket";}]; + imagePullPolicy = "Always"; + volumeMounts = [ + { + name = "certs"; + mountPath = "/etc/prosody/certs/chat.gmem.ca.crt"; + subPath = "tls.crt"; + } + { + name = "certs"; + mountPath = "/etc/prosody/certs/chat.gmem.ca.key"; + subPath = "tls.key"; + } + ]; + ports.http.containerPort = 5280; + }; + }; + volumes = { + certs.secret.secretName = "chat-gmem-ca"; + }; + }; + }; + }; + }; + kubernetes.resources.deployments.snikket-web-portal = { + metadata.namespace = "snikket"; + spec = { + selector.matchLabels.app = appName + "-web-portal"; + template = { + metadata.labels.app = appName + "-web-portal"; + spec = { + containers = { + snikket = { + image = snikketPortalImage; + env.SNIKKET_TWEAK_PORTAL_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0"; + env.SNIKKET_WEB_PROSODY_ENDPOINT.value = "http://snikket:5280"; + imagePullPolicy = "Always"; + ports.http.containerPort = 5765; + }; + }; + }; + }; + }; + }; + kubernetes.resources.ingresses.snikket = { + metadata = { + name = appName; + namespace = "snikket"; + annotations = { + "cert-manager.io/cluster-issuer" = "le-issuer"; + }; + }; + spec = { + tls = [ + { + hosts = ["chat.gmem.ca"]; + } + ]; + rules = [ + { + host = "chat.gmem.ca"; + http.paths = [ + { + path = "/"; + pathType = "Prefix"; + backend.service = { + name = appName + "-web-portal"; + port.name = "http"; + }; + } + ] + ++ lib.lists.forEach [ + # Routes we want to hit Prosody's backend + "/admin_api" + "/invites_api" + "/invites_bootstrap" + "/upload" + "/http-bind" + "/xmpp-websocket" + "/.well-known/host-meta" + "/.well-known/host-meta.json" + ] (path: { + path = path; + pathType = "Prefix"; + backend.service = { + name = appName; + port.name = "http"; + }; + }); + } + ]; + }; + }; +} diff --git a/homelab/tclip.nix b/homelab/tclip.nix index 30c3fc5..6df1132 100644 --- a/homelab/tclip.nix +++ b/homelab/tclip.nix @@ -1,108 +1,50 @@ let appName = "tclip"; - litestreamImage = "litestream/litestream:sha-749bc0d"; tclipImage = "git.gmem.ca/arch/tclip:arm"; in { - kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.tclip = { + kubernetes.resources.statefulSets.tclip = { + metadata.namespace = "tclip"; spec = { + serviceName = appName; selector.matchLabels.app = appName; - endpoints = [ - { - port = "metrics"; - interval = "30s"; - } - ]; - }; - }; - kubernetes.resources.services.tclip = { - metadata.labels.app = appName; - spec = { - selector.app = appName; - ports.metrics = { - port = 9090; - targetPort = 9090; - }; - }; - }; - kubernetes.resources.statefulSets.tclip.spec = { - selector.matchLabels.app = appName; - serviceName = appName; - template = { - metadata.labels.app = appName; - spec = { - volumes = { - litestream.configMap.name = "tclip-litestream"; - config.configMap.name = "tclip"; - }; - initContainers.init-litestream = { - image = litestreamImage; - args = ["restore" "-if-db-not-exists" "-if-replica-exists" "-v" "/data/data.db"]; - volumeMounts = [ - { - name = "data"; - mountPath = "/data"; - } - { - name = "litestream"; - mountPath = "/etc/litestream.yml"; - subPath = "tclip.yml"; - } - ]; - envFrom = [{secretRef.name = "tclip-litestream-s3";}]; - }; - containers = { - tclip = { - image = tclipImage; - imagePullPolicy = "Always"; - volumeMounts = [ - { - name = "data"; - mountPath = "/data"; - } - ]; - env = [ - { - name = "DATA_DIR"; - value = "/data"; - } - { - name = "USE_FUNNEL"; - value = "true"; - } - ]; - }; - litestream = { - image = litestreamImage; - args = ["replicate"]; - volumeMounts = [ - { - name = "data"; - mountPath = "/data"; - } - { - name = "litestream"; - mountPath = "/etc/litestream.yml"; - subPath = "tclip.yml"; - } - ]; - envFrom = [{secretRef.name = "tclip-litestream-s3";}]; - ports.metrics = { - containerPort = 9090; - name = "metrics"; + template = { + metadata.labels.app = appName; + spec = { + containers = { + tclip = { + image = tclipImage; + imagePullPolicy = "Always"; + env = [ + { + name = "DATA_DIR"; + value = "/state"; + } + { + name = "USE_FUNNEL"; + value = "true"; + } + ]; + envFrom = [{secretRef.name = "tclip";}]; + volumeMounts = [ + { + name = "state"; + mountPath = "/state"; + } + ]; }; }; }; }; + volumeClaimTemplates = [ + { + metadata.name = "state"; + spec = { + storageClassName = "nfs-client"; + accessModes = ["ReadWriteOnce"]; + resources.requests.storage = "512Mi"; + }; + } + ]; }; - volumeClaimTemplates = [ - { - metadata.name = "data"; - spec = { - storageClassName = "nfs-client"; - accessModes = ["ReadWriteOnce"]; - resources.requests.storage = "1Gi"; - }; - } - ]; }; } diff --git a/homelab/tools.yml b/homelab/tools.yml index f9ba328..0a9a516 100644 --- a/homelab/tools.yml +++ b/homelab/tools.yml @@ -2,6 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: it-tools + namespace: it-tools spec: selector: matchLabels: @@ -26,6 +27,7 @@ apiVersion: v1 kind: Service metadata: name: it-tools + namespace: it-tools spec: selector: app: it-tools @@ -37,15 +39,14 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: it-tools + namespace: it-tools annotations: - cert-manager.io/issuer: "le-issuer" + cert-manager.io/cluster-issuer: "le-issuer" nginx.ingress.kubernetes.io/proxy-body-size: 100m - namespace: default spec: tls: - hosts: - tools.gmem.ca - secretName: gmem-ca-wildcard rules: - host: tools.gmem.ca http: diff --git a/homelab/vaultwarden.yml b/homelab/vaultwarden.yml index 7471695..6240cea 100644 --- a/homelab/vaultwarden.yml +++ b/homelab/vaultwarden.yml @@ -1,90 +1,46 @@ apiVersion: apps/v1 -kind: StatefulSet +kind: Deployment metadata: name: vaultwarden spec: + replicas: 1 selector: matchLabels: app: vaultwarden - serviceName: vaultwarden - replicas: 1 template: metadata: labels: app: vaultwarden spec: volumes: - - name: litestream - configMap: - name: vaultwarden-litestream - name: config configMap: name: vaultwarden - initContainers: - - name: init-litestream - image: litestream/litestream:0.3.11 - args: ['restore', '-if-db-not-exists', '-if-replica-exists', '-v', '/data/db.sqlite3'] - volumeMounts: - - name: data - mountPath: /data - - name: litestream - mountPath: /etc/litestream.yml - subPath: vaultwarden.yml + - name: data + emptyDir: {} + containers: + - name: vaultwarden + image: docker.io/vaultwarden/server:testing + imagePullPolicy: Always + resources: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "64Mi" + cpu: "100m" envFrom: - secretRef: - name: vaultwarden-litestream-s3 - containers: - - name: vaultwarden - image: docker.io/vaultwarden/server:testing - imagePullPolicy: Always - resources: - limits: - memory: "128Mi" - cpu: "500m" - requests: - memory: "64Mi" - cpu: "100m" - ports: - - containerPort: 80 - name: web - volumeMounts: - - name: data - mountPath: /data - - name: config - mountPath: /data/config.json - subPath: vaultwarden.json - - name: litestream - image: litestream/litestream:0.3.11 - args: ['replicate'] - volumeMounts: - - name: data - mountPath: /data - - name: litestream - mountPath: /etc/litestream.yml - subPath: vaultwarden.yml - envFrom: - - secretRef: - name: vaultwarden-litestream-s3 - ports: - - name: metrics - containerPort: 9090 - resources: - limits: - memory: "128Mi" - cpu: "300m" - requests: - memory: "64Mi" - cpu: "100m" - - volumeClaimTemplates: - - metadata: - name: data - spec: - storageClassName: nfs-client - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi + name: vaultwarden + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: data + mountPath: /data + - name: config + mountPath: /data/config.json + subPath: vaultwarden.json --- apiVersion: v1 kind: Service @@ -99,31 +55,13 @@ spec: - port: 80 targetPort: 80 name: web - - port: 9090 - targetPort: 9090 - name: metrics ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: vaultwarden -spec: - selector: - matchLabels: - app: vaultwarden - endpoints: - - port: metrics - interval: 30s --- apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: vaultwarden annotations: - cert-manager.io/issuer: "le-issuer" - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Forwarded-For $http_x_forwarded_for"; - namespace: default + cert-manager.io/cluser-issuer: "le-issuer" spec: tls: - hosts: diff --git a/homelab/vrchat-prometheus-exporter.nix b/homelab/vrchat-prometheus-exporter.nix index 0245665..2bbe0fb 100644 --- a/homelab/vrchat-prometheus-exporter.nix +++ b/homelab/vrchat-prometheus-exporter.nix @@ -3,6 +3,7 @@ let appImage = "git.gmem.ca/arch/vrchat-prometheus-adapter:arm"; in { kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.vrchat-prometheus-adapter = { + metadata.namespace = "vrchat"; spec = { selector.matchLabels.app = appName; endpoints = [ @@ -14,6 +15,7 @@ in { }; }; kubernetes.resources.services.vrchat-prometheus-adapter = { + metadata.namespace = "vrchat"; metadata.labels.app = appName; spec = { selector.app = appName; @@ -23,35 +25,38 @@ in { }; }; }; - kubernetes.resources.deployments.vrchat-prometheus-adapter.spec = { - selector.matchLabels.app = appName; - template = { - metadata.labels.app = appName; - spec = { - volumes = { - config.configMap.name = "vrchat-prometheus-adapter"; - }; - containers = { - vrchat-prometheus-adapter = { - image = appImage; - imagePullPolicy = "Always"; - volumeMounts = [ - { - name = "config"; - mountPath = "/config.toml"; - subPath = "config.toml"; - } - ]; - envFrom = [{secretRef.name = "vrchat-prometheus-adapter";}]; - ports.metrics.containerPort = 6534; - resources = { - requests = { - cpu = "50m"; - memory = "32Mi"; - }; - limits = { - cpu = "500m"; - memory = "256Mi"; + kubernetes.resources.deployments.vrchat-prometheus-adapter = { + metadata.namespace = "vrchat"; + spec = { + selector.matchLabels.app = appName; + template = { + metadata.labels.app = appName; + spec = { + volumes = { + config.configMap.name = "vrchat-prometheus-adapter"; + }; + containers = { + vrchat-prometheus-adapter = { + image = appImage; + imagePullPolicy = "Always"; + volumeMounts = [ + { + name = "config"; + mountPath = "/config.toml"; + subPath = "config.toml"; + } + ]; + envFrom = [{secretRef.name = "vrchat-prometheus-adapter";}]; + ports.metrics.containerPort = 6534; + resources = { + requests = { + cpu = "50m"; + memory = "32Mi"; + }; + limits = { + cpu = "500m"; + memory = "256Mi"; + }; }; }; };