Transition from Nix to YAML for Kubernetes manifests
All checks were successful
Lint / lint (push) Successful in 36s

This commit is contained in:
Gabriel Simmer 2024-07-05 17:24:10 +01:00
parent 09cf8c226b
commit 35375f6272
Signed by: arch
SSH key fingerprint: SHA256:m3OEcdtrnBpMX+2BDGh/byv3hrCekCLzDYMdvGEKPPQ
162 changed files with 2955 additions and 3563 deletions

View file

@ -45,7 +45,6 @@
terranix,
alertmanager-ntfy,
nixpkgs-wayland,
kubenix,
nixos-dns,
nixos-hardware,
emacs-overlay,
@ -129,18 +128,6 @@
((pkgs.callPackage ./dns/nextdns.nix) {}).data
)
);
kubernetes =
(kubenix.evalModules.x86_64-linux {
module = {kubenix, ...}: {
imports = [
kubenix.modules.k8s
./homelab/kubernetes.nix
];
};
})
.config
.kubernetes
.result;
dns = generate.octodnsConfig {
inherit dnsConfig;
config = {
@ -229,18 +216,6 @@
])}/bin/octodns-sync --config-file ${self.packages.x86_64-linux.dns} --doit
'');
};
kube-apply = {
type = "app";
program = toString (pkgs.writers.writeBash "diff" ''
${pkgs.kubectl}/bin/kubectl apply -f ${self.packages.x86_64-linux.kubernetes}
'');
};
kube-diff = {
type = "app";
program = toString (pkgs.writers.writeBash "diff" ''
${pkgs.kubectl}/bin/kubectl diff -f ${self.packages.x86_64-linux.kubernetes}
'');
};
tf-plan = {
type = "app";
program = toString (pkgs.writers.writeBash "plan" ''

View file

@ -1,97 +0,0 @@
let
appName = "changedetection";
changedetection-Image = "dgtlmoon/changedetection.io:latest";
browserless-Image = "browserless/chrome:latest";
in
{...}: {
kubernetes.resources.services.changedetection = {
spec = {
selector.app = appName;
ports.http = {
port = 5000;
targetPort = 5000;
};
};
};
kubernetes.resources.statefulSets.changedetection.spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = appName;
};
containers = {
changedetection = {
image = changedetection-Image;
imagePullPolicy = "Always";
ports.http.containerPort = 5000;
env = [
{
name = "PLAYWRIGHT_DRIVER_URL";
value = "ws://localhost:3000";
}
];
volumeMounts = [
{
name = "data";
mountPath = "/datastore";
}
];
};
browserless = {
image = browserless-Image;
imagePullPolicy = "Always";
ports.webdriver.containerPort = 3000;
resources = {
requests.memory = "768Mi";
limits.memory = "2Gi";
};
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "data";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "1Gi";
};
}
];
};
kubernetes.resources.ingresses.changedetection = {
metadata = {
name = appName;
annotations = {
"cert-manager.io/issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["changedetect.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [
{
host = "changedetect.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,24 +0,0 @@
{
lib,
config,
kubenix,
...
}: {
kubernetes.helm.releases.cloudflare-exporter = {
namespace = "cloudflare";
chart = kubenix.lib.helm.fetch {
repo = "https://lablabs.github.io/cloudflare-exporter";
chart = "cloudflare-exporter";
version = "0.1.9";
sha256 = "sha256-ZTyE6I3vV9tjKRRc84EvoqboS01SPKVb74jYN8prnfA=";
};
values = {
image.tag = "0.0.14";
secretRef = "cloudflare-exporter";
serviceMonitor = {
enabled = true;
labels.release = "prometheus";
};
};
};
}

View file

@ -1,111 +0,0 @@
let
appName = "conduwuit";
conduwuit-Image = "git.gmem.ca/arch/conduwuit:latest";
in
{...}: {
kubernetes.resources.services.conduwuit = {
spec = {
selector.app = appName;
ports.http = {
port = 6167;
targetPort = 6167;
};
};
};
kubernetes.resources.statefulSets.conduwuit.spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = appName;
};
containers = {
conduwuit = {
image = conduwuit-Image;
imagePullPolicy = "Always";
ports.http.containerPort = 6167;
volumeMounts = [
{
name = "data";
mountPath = "/var/lib/matrix-conduit";
}
{
name = "config";
mountPath = "/etc/matrix-conduit/conduit.toml";
subPath = "conduit.toml";
}
];
env.CONDUIT_CONFIG.value = "/etc/matrix-conduit/conduit.toml";
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "data";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "5Gi";
};
}
];
};
kubernetes.resources.ingresses.conduwuit = {
metadata = {
name = appName;
annotations = {
"cert-manager.io/issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["chat.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [
{
host = "chat.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
kubernetes.resources.configMaps.conduwuit = {
metadata = {
name = appName;
annotations = {
"cert-manager.io/issuer" = "le-issuer";
};
};
data."conduit.toml" = ''
[global]
# The Conduit server needs all /_matrix/ requests to be reachable at
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
server_name = "gmem.ca"
# This is the only directory where Conduit will save its data
database_path = "/var/lib/matrix-conduit/"
database_backend = "rocksdb"
port = 6167
max_request_size = 20_000_000 # in bytes
allow_federation = true
allow_check_for_updates = false
trusted_servers = ["matrix.org"]
address = "0.0.0.0"
'';
};
}

View file

@ -1,32 +0,0 @@
apiVersion: v1
data:
ADMIN_ROLE_ID: "449317401482231808"
API_CACHE_ENTRIES_LIMIT: "100"
API_CACHE_EXPIRATION_IN_SECONDS: "10800"
API_CACHE_REVALIDATION_WINDOW_IN_SECONDS: "300"
AWAIT_MESSAGE_TIMEOUT: "600"
CACHE_REVALIDATION_IN_SECONDS: "5"
FINAL_CACHE_EXPIRATION_IN_SECONDS: "30"
HELPFUL_ROLE_EXEMPT_ID: "734887151056978025"
HELPFUL_ROLE_ID: "725027785424240671"
HELPFUL_ROLE_POINT_THRESHOLD: "40"
INTRO_CHANNEL: "561171851556945920"
INTRO_ROLE: "992891679579324498"
JOB_POSTINGS_CHANNEL: "598513460019462144"
JOIN_LOG_CHANNEL: "452597959066910724"
MINIMAL_AMOUNT_OF_WORDS: "5"
MINIMAL_COMPENSATION: "15.00"
MOD_CHANNEL: "482153306567737345"
MOD_ROLE_ID: "465222496891699200"
NEW_USER_ROLE: "992891582334382230"
NUMBER_OF_ALLOWED_MESSAGES: "5"
ONBOARDING_CHANNEL: "963568922026717214"
POINT_DECAY_TIMER: "24"
POINT_LIMITER_IN_MINUTES: "30"
POST_LIMITER_IN_HOURS: "1"
REPO_LINK: https://github.com/r-webdev/webdev-support-bot
VAR_DETECT_LIMIT: "7200000"
kind: ConfigMap
metadata:
name: webdev-support-bot
namespace: default

View file

@ -1,11 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: art-by-couch-couchdb
spec:
selector:
matchLabels:
app: couchdb
endpoints:
- port: metrics
interval: 30s

View file

@ -1,37 +0,0 @@
couchdbConfig:
couchdb:
uuid: 25274915ac5d403292fef27909e679cc
chttpd:
enable_cors: true
cors:
origins: "https://artbybecki.com, https://admin.artbybecki.com"
clusterSize: 1
persistentVolume:
enabled: true
size: 5Gi
storageClass: local-path
image:
tag: 3.3.1
ingress:
enabled: true
hosts:
- couch.artbybecki.com
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/proxy-body-size: 10m
tls:
- secretName: couch-tls
hosts:
- couch.artbybecki.com
autoSetup:
enabled: true
image:
repository: curlimages/curl
prometheusPort:
enabled: true
bind_address: "0.0.0.0"
port: 17986

View file

@ -1,30 +0,0 @@
{lib, ...}: {
kubernetes.resourceOrder = [
"CustomResourceDefinition"
"Namespace"
"ConfigMap"
];
kubernetes.customTypes = [
{
name = "servicemonitors";
attrName = "servicemonitor";
group = "monitoring.coreos.com";
kind = "ServiceMonitor";
version = "v1";
module = {
options.endpoints = lib.mkOption {
description = "Endpoints";
type = lib.types.list;
};
};
}
{
name = "infisicalsecrets";
attrName = "infisicalsecret";
group = "secrets.infisical.com";
kind = "InfisicalSecret";
version = "v1alpha1";
}
];
}

View file

@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dref
namespace: default
spec:
selector:
matchLabels:
app: dref
template:
metadata:
labels:
app: dref
spec:
nodeSelector:
kubernetes.io/arch: arm64
containers:
- name: dref
image: icr.gmem.ca/dref
resources:
limits:
memory: "32Mi"
cpu: "100m"
requests:
memory: "1Mi"
cpu: "1m"
ports:
- containerPort: 80
env:
- name: DREF_REGISTRY
value: registry:5000
- name: DREF_REGISTRY_USE_SSL
value: "false"
---
apiVersion: v1
kind: Service
metadata:
name: dref
spec:
selector:
app: dref
ports:
- port: 3000
targetPort: 3000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dref
annotations:
cert-manager.io/issuer: "le-issuer"
namespace: default
spec:
tls:
- hosts:
- dref.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: dref.gmem.ca
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: dref
port:
number: 3000

View file

@ -1,73 +0,0 @@
let
appName = "duplikate";
appImage = "git.gmem.ca/arch/duplikate:latest";
functions = import ./functions.nix {};
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.duplikate = {
metadata.namespace = "duplikate";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
duplikate = {
image = appImage;
env.REDIS_URL.value = "redis://duplikate-redis-master";
envFrom = [
{secretRef.name = "duplikate";}
];
resources = {
requests = {
cpu = "10m";
memory = "32Mi";
};
limits = {
cpu = "1";
memory = "128Mi";
};
};
};
};
};
};
};
};
kubernetes.resources."secrets.infisical.com"."v1alpha1".InfisicalSecret.duplikate = functions.secret "duplikate";
kubernetes.helm.releases.duplikate-redis = {
namespace = "duplikate";
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
version = "18.6.1";
sha256 = "CyvGHc1v1BtbzDx6hbbPah2uWpUhlNIUQowephT6hmM=";
};
values = {
auth.enabled = false;
architecture = "standalone";
image = {
registry = "registry.redict.io";
repository = "redict";
tag = "7.3-compat";
digest = "sha256:91fcd3124ddb77a098ec0da93c07f99b02b178ab356fe51aa0839aaa62891208";
};
};
};
kubernetes.resources.statefulSets.duplikate-redis-master = {
metadata.namespace = "duplikate";
spec = {
template.spec.volumes.start-scripts.configMap.name = lib.mkForce "duplikate-redis-scripts-a4596108c1";
template.spec.volumes.health.configMap.name = lib.mkForce "duplikate-redis-health-05691b979f";
template.spec.volumes.config.configMap.name = lib.mkForce "duplikate-redis-configuration-4712c8e029";
};
};
}

View file

@ -1,98 +0,0 @@
let
endpoints = {
"git" = {
location = "192.168.50.229";
host = "git.gmem.ca";
port = 443;
protocol = "HTTPS";
};
"proxmox" = {
location = "192.168.50.3";
host = "proxmox.gmem.ca";
port = 8006;
protocol = "HTTPS";
};
"austin" = {
location = "192.168.50.237";
host = "austin.gmem.ca";
port = 8080;
protocol = "HTTP";
};
"tokyo" = {
location = "192.168.50.124";
host = "tokyo.gmem.ca";
port = 8000;
protocol = "HTTP";
};
"ibiza" = {
location = "192.168.50.182";
host = "ibiza.gmem.ca";
port = 8000;
protocol = "HTTP";
};
};
in {
kubernetes.resources.services =
builtins.mapAttrs (name: endpoint: {
metadata.namespace = "endpoints";
spec = {
ports.${name} = {
port = endpoint.port;
targetPort = endpoint.port;
};
};
})
endpoints;
kubernetes.resources.endpoints =
builtins.mapAttrs (name: endpoint: {
metadata.namespace = "endpoints";
subsets = [
{
addresses = [{ip = endpoint.location;}];
ports = [
{
name = name;
port = endpoint.port;
protocol = "TCP";
}
];
}
];
})
endpoints;
kubernetes.resources.ingresses =
builtins.mapAttrs (name: endpoint: {
metadata = {
name = name;
namespace = "endpoints";
annotations = {
"nginx.ingress.kubernetes.io/proxy-body-size" = "10g";
"cert-manager.io/cluser-issuer" = "le-issuer";
"nginx.ingress.kubernetes.io/backend-protocol" = endpoint.protocol;
};
};
spec = {
tls = [
{
hosts = [endpoint.host];
}
];
rules = [
{
host = endpoint.host;
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = name;
port.number = endpoint.port;
};
}
];
}
];
};
})
endpoints;
}

View file

@ -1,146 +0,0 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: grocy
spec:
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: grocy
serviceName: grocy
replicas: 1
template:
metadata:
labels:
app: grocy
spec:
securityContext:
fsGroup: 911
#runAsUser: 911
#runAsGroup: 911
initContainers:
- name: init-litestream
image: litestream/litestream:sha-749bc0d
args: ['restore', '-if-db-not-exists', '-if-replica-exists', '-v', '/config/data/grocy.db']
volumeMounts:
- name: config
mountPath: /config
- name: litestream
mountPath: /etc/litestream.yml
subPath: grocy.yml
envFrom:
- secretRef:
name: grocy-litestream-s3
containers:
- name: grocy
image: lscr.io/linuxserver/grocy:latest
ports:
- containerPort: 80
name: web
env:
- name: PUID
value: "911"
- name: PGID
value: "911"
volumeMounts:
- name: config
mountPath: /config
- name: litestream
image: litestream/litestream:sha-749bc0d
args: ['replicate']
volumeMounts:
- name: config
mountPath: /config
- name: litestream
mountPath: /etc/litestream.yml
subPath: grocy.yml
envFrom:
- secretRef:
name: grocy-litestream-s3
ports:
- name: metrics
containerPort: 9090
resources:
limits:
memory: "128Mi"
cpu: "300m"
requests:
memory: "64Mi"
cpu: "100m"
volumes:
- name: litestream
configMap:
name: grocy-litestream
volumeClaimTemplates:
- metadata:
name: config
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: grocy
annotations:
prometheus.io/port: "9090"
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
spec:
selector:
app: grocy
ports:
- port: 80
targetPort: 80
name: web
- port: 9090
targetPort: 9090
name: litestream-metrics
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: grocy
spec:
selector:
matchLabels:
app: grocy
endpoints:
- port: metrics
interval: 30s
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: food
annotations:
cert-manager.io/issuer: "le-issuer"
nginx.ingress.kubernetes.io/proxy-body-size: 100m
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header X-Forwarded-Proto $scheme;
namespace: default
spec:
tls:
- hosts:
- food.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: food.gmem.ca
http:
paths:
- backend:
service:
name: grocy
port:
number: 80
path: /
pathType: Prefix

View file

@ -1,56 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: act-runner
name: act-runner
spec:
replicas: 1
selector:
matchLabels:
app: act-runner
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: act-runner
spec:
restartPolicy: Always
volumes:
- name: runner-data
emptyDir: {}
initContainers:
- name: runner-config-generation
image: code.forgejo.org/forgejo/runner:2.4.0
command: [ "sh", "-c", "cd /data && forgejo-runner create-runner-file --instance $GITEA_INSTANCE_URL --secret $RUNNER_SECRET --connect" ]
env:
- name: RUNNER_SECRET
valueFrom:
secretKeyRef:
name: runner-secret
key: token
- name: GITEA_INSTANCE_URL
value: https://git.gmem.ca
volumeMounts:
- name: runner-data
mountPath: /data
containers:
- name: runner
image: gitea/act_runner:nightly-dind-rootless
imagePullPolicy: Always
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
- name: GITEA_INSTANCE_URL
value: https://git.gmem.ca
securityContext:
privileged: true
volumeMounts:
- name: runner-data
mountPath: /data

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: freshrss-config
data:
CRON_MIN: "1,31"
# OIDC_ENABLED: "1"
OIDC_PROVIDER_METADATA_URL: https://authentik.gmem.ca/application/o/freshrss/.well-known/openid-configuration
OIDC_REMOTE_USER_CLAIM: preferred_username
OIDC_CLIENT_ID: WSZI1tVeDE5FhC6XF3nbmjNh3UhCcpNXll7Zf4bJ
OIDC_SCOPES: "openid profile"
OIDC_X_FORWARDED_HEADERS: X-Forwarded-Host X-Forwarded-Port X-Forwarded-Proto

View file

@ -1,28 +0,0 @@
{ ... }: {
secret = name: {
metadata.namespace = "${name}";
spec = {
hostAPI = "http://infisical:8080";
resyncInterval = 10;
authentication = {
kubernetesAuth = {
identityId = "68d1f432-7b0a-4e4a-b439-acbbbc160f1e";
serviceAccountRef = {
name = "infisical-auth";
namespace = "infisical";
};
secretsScope = {
projectSlug = "kubernetes-homelab-dp67";
envSlug = "prod";
secretsPath = "/${name}";
};
};
};
managedSecretReference = {
secretName = "${name}";
secretNamespace = "${name}";
creationPolicy = "Owner";
};
};
};
}

Binary file not shown.

View file

@ -1,355 +0,0 @@
{
lib,
config,
kubenix,
...
}: let
homepage-config = {
bookmarks = [];
services = [
{
Media = [
{
Plex = {
icon = "plex.png";
href = "https://app.plex.tv";
description = "Plex";
widget = {
type = "plex";
url = "http://192.168.50.229:32400";
key = "{{HOMEPAGE_VAR_PLEX_KEY}}";
};
};
}
{
Jellyseerr = {
icon = "jellyseerr.png";
href = "https://request-media.gmem.ca";
description = "Request movies and TV shows";
widget = {
type = "jellyseerr";
url = "https://request-media.gmem.ca";
key = "{{HOMEPAGE_VAR_JELLYSEERR_KEY}}";
};
};
}
{
Transmission = {
icon = "transmission.png";
description = "Download progress for torrents";
widget = {
type = "transmission";
url = "http://192.168.50.187:9091";
};
};
}
];
}
{
"Personal Infrastructure" = [
{
authentik = {
icon = "authentik.png";
href = "https://authentik.gmem.ca";
description = "OIDC SSO";
};
}
{
Tailscale = {
icon = "tailscale.png";
href = "https://login.tailscale.com";
description = "VPN provider";
};
}
{
Git = {
icon = "forgejo.png";
href = "https://git.gmem.ca";
description = "Git forge";
};
}
{
Grafana = {
icon = "grafana.png";
href = "https://grafana.gmem.ca";
description = "Monitoring & metrics";
widget = {
type = "grafana";
url = "https://grafana.gmem.ca";
username = "api@localhost";
password = "{{HOMEPAGE_VAR_GRAFANA_PASSWORD}}";
};
};
}
{
NextDNS = {
icon = "nextdns.png";
href = "https://my.nextdns.io/bcee89/setup";
description = "DNS provider";
widget = {
type = "nextdns";
profile = "bcee89";
key = "{{HOMEPAGE_VAR_NEXTDNS_KEY}}";
};
};
}
{
"Proxmox" = {
icon = "proxmox.png";
href = "https://proxmox.gmem.ca";
description = "Homelab proxmox";
widget = {
type = "proxmox";
url = "https://proxmox.gmem.ca";
username = "api@pam!homepage";
password = "{{HOMEPAGE_VAR_PROXMOX_PASSWORD}}";
};
};
}
{
"Immich" = {
icon = "immich.png";
href = "https://photos.gmem.ca";
description = "Image hosting";
widget = {
type = "immich";
url = "https://photos.gmem.ca";
key = "{{HOMEPAGE_VAR_IMMICH_KEY}}";
};
};
}
{
"NextDNS Tailscale" = {
icon = "nextdns.png";
href = "https://my.nextdns.io/74c6db/setup";
description = "Tailnet DNS provider";
widget = {
type = "nextdns";
profile = "74c6db";
key = "{{HOMEPAGE_VAR_NEXTDNS_KEY}}";
};
};
}
{
"Paperless-ngx" = {
icon = "paperless-ngx.png";
href = "https://docs.gmem.ca";
description = "Document storage and indexing";
};
}
];
}
{
Reading = [
{
miniflux = {
icon = "miniflux.png";
href = "https://rss.gmem.ca";
description = "Miniflux RSS Reader";
};
}
{
"Lobste.rs" = {
href = "https://lobste.rs";
description = "News aggregator";
};
}
{
"Hacker News" = {
href = "https://news.ycombinator.com";
description = "VC news aggregator";
};
}
];
}
{
"Floofy.tech Infrastructure" = [
{
Mastodon = {
icon = "mastodon.png";
href = "https://floofy.tech";
description = "Primary Mastodon instance";
widget = {
type = "mastodon";
url = "https://floofy.tech";
};
};
}
{
Grafana = {
icon = "grafana.png";
href = "https://grafana.services.floofy.tech";
description = "Metrics and Monitoring";
};
}
{
vSphere = {
icon = "vmware-esxi.png";
href = "https://vcenter.services.floofy.tech";
description = "Hypervisor Manager";
};
}
{
"vrclub.social" = {
icon = "calckey.png";
href = "https://vrclub.social";
description = "Firefish instance for VR clubs";
};
}
];
}
{
"Tools" = [
{
"IT Tools" = {
icon = "it-tools.png";
href = "https://tools.gmem.ca";
description = "Various useful tools";
};
}
{
Cyberchef = {
icon = "cyberchef.png";
href = "https://gchq.github.io/CyberChef/";
description = "More useful tools, mostly text manipulation";
};
}
];
}
{
"Backup Status" = [
{
"gsimmer backups" = {
icon = "healthchecks.png";
href = "https://healthchecks.gmem.ca";
description = "Uptime monitor for recurring tasks";
widget = {
type = "healthchecks";
url = "https://healthchecks.gmem.ca";
key = "{{HOMEPAGE_VAR_HEALTHCHECKS_KEY}}";
uuid = "617d460f-69f6-444f-852a-421861543327";
};
};
}
{
"becki backups" = {
icon = "healthchecks.png";
href = "https://healthchecks.gmem.ca";
description = "Uptime monitor for recurring tasks";
widget = {
type = "healthchecks";
url = "https://healthchecks.gmem.ca";
key = "{{HOMEPAGE_VAR_HEALTHCHECKS_KEY}}";
uuid = "9d01d3dd-2a56-4c70-9b5c-9cb99a1466db";
};
};
}
{
"apps backups" = {
icon = "healthchecks.png";
href = "https://healthchecks.gmem.ca";
description = "Uptime monitor for recurring tasks";
widget = {
type = "healthchecks";
url = "https://healthchecks.gmem.ca";
key = "{{HOMEPAGE_VAR_HEALTHCHECKS_KEY}}";
uuid = "37a854b0-9191-4452-aa30-df3969d59b09";
};
};
}
];
}
];
settings = {
title = "Arch's Homepage";
providers.openweathermap = "{{HOMEPAGE_VAR_WEATHER_KEY}}";
background = {
image = "https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80";
blur = "sm";
opacity = 50;
};
base = "https://home.gmem.ca";
layout.Media.style = "row";
layout.Media.columns = "3";
layout."Personal Infrastructure".style = "row";
layout."Personal Infrastructure".columns = "3";
layout."Backup Status".style = "row";
layout."Backup Status".columns = "3";
};
kubernetes.mode = "cluster";
widgets = [
{
logo.icon = "https://gmem.ca/avatar.png";
}
{
kubernetes = {
cluster.show = true;
nodes.show = true;
};
}
{
search = {
provider = "duckduckgo";
};
}
{
openweathermap = {
provider = "openweathermap";
cache = 5;
units = "metric";
};
}
];
};
in {
kubernetes.helm.releases.homepage = {
namespace = "homepage";
chart = kubenix.lib.helm.fetch {
repo = "https://jameswynn.github.io/helm-charts";
chart = "homepage";
version = "1.2.3";
sha256 = "sha256-tZv/+ePFPifilp8wU4FjogSJhNEERx0PcdHUzsBxfRA=";
};
# arbitrary attrset passed as values to the helm release
values = {
replicaCount = 2;
image = {
repository = "ghcr.io/gethomepage/homepage";
tag = "latest";
};
serviceAccount.create = true;
enableRbac = true;
config = homepage-config;
ingress.main = {
enabled = true;
ingressClassName = "nginx";
hosts = [
{
host = "home.gmem.ca";
paths = [
{
path = "/";
pathType = "Prefix";
}
];
}
];
tls = [
{
hosts = ["home.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
};
};
};
kubernetes.resources.deployments.homepage = {
metadata.namespace = "homepage";
spec.template = {
metadata.annotations."gmem.ca/homepage-config-hash" = builtins.hashString "md5" (builtins.toJSON homepage-config);
spec.containers.homepage.envFrom = [{secretRef.name = "homepage-config";}];
};
};
}

View file

@ -1,67 +0,0 @@
{
lib,
config,
kubenix,
...
}: {
kubernetes.helm.releases.immich = {
namespace = "immich";
chart = kubenix.lib.helm.fetch {
repo = "https://immich-app.github.io/immich-charts";
chart = "immich";
version = "0.6.0";
sha256 = "p9fgqRMxRJ2rMBZZfMKuAIjp/N1/KgKCKLDhoXO0O6c=";
};
# arbitrary attrset passed as values to the helm release
values = {
image.tag = "v1.105.1";
machine-learning.enabled = false;
immich.persistence.library.existingClaim = "immich";
redis = {
enabled = true;
};
env = {
PGSSLMODE = "no-verify";
DB_PASSWORD.valueFrom.secretKeyRef = {
name = "postgres-immich";
key = "password";
};
DB_HOSTNAME.value = "192.168.50.236";
};
server.ingress.main = {
enabled = true;
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
tls = [
{
hosts = ["photos.gmem.ca"];
}
];
hosts = [
{
host = "photos.gmem.ca";
paths = [{path = "/";}];
}
];
};
};
};
kubernetes.resources.persistentVolumeClaims.immich = {
metadata = {
name = "immich";
namespace = "immich";
};
spec = {
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "50Gi";
};
};
kubernetes.resources.statefulSets.immich-redis-master = {
metadata.namespace = "immich";
spec.template.spec.containers.redis.image = lib.mkForce "registry.redict.io/redict:7.3-compat";
};
}

View file

@ -1,163 +0,0 @@
let
appName = "soju";
sojuImage = "git.gmem.ca/arch/soju:latest";
gamjaImage = "git.gmem.ca/arch/gamja:latest";
in {
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.soju = {
metadata.namespace = "irc";
spec = {
type = "NodePort";
selector.app = appName;
ports.tls = {
port = 6697;
targetPort = 6697;
nodePort = 6697;
};
};
};
kubernetes.resources.services.soju-ws = {
metadata.namespace = "irc";
spec = {
selector.app = appName;
ports.ws = {
port = 80;
targetPort = 80;
};
};
};
kubernetes.resources.services.gamja = {
metadata.namespace = "irc";
spec = {
selector.app = "gamja";
ports.http = {
port = 80;
targetPort = 80;
};
};
};
kubernetes.resources.deployments.soju = {
metadata.namespace = "irc";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = config.kubernetes.resources.configMaps.soju.metadata.name;
ssl.secret.secretName = "irc-gmem-ca";
};
containers = {
soju = {
image = sojuImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/etc/soju/config";
subPath = "config";
}
{
name = "ssl";
mountPath = "/ssl";
}
];
ports.tls.containerPort = 6697;
ports.ws.containerPort = 80;
env.PGHOST.value = "192.168.50.236";
env.PGPASSWORD.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "password";
};
env.PGUSER.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "user";
};
env.PGDATABASE.valueFrom.secretKeyRef = {
name = "postgres-soju";
key = "dbname";
};
};
};
};
};
};
};
kubernetes.resources.deployments.gamja = {
metadata.namespace = "irc";
spec = {
selector.matchLabels.app = "gamja";
template = {
metadata.labels.app = "gamja";
spec = {
containers = {
gamja = {
image = gamjaImage;
imagePullPolicy = "Always";
ports.http.containerPort = 80;
};
};
};
};
};
};
kubernetes.resources.ingresses.irc = {
metadata.namespace = "irc";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
"nginx.ingress.kubernetes.io/proxy-read-timeout" = "3600";
"nginx.ingress.kubernetes.io/proxy-send-timeout" = "3600";
};
spec = {
tls = [
{
hosts = ["irc.gmem.ca"];
}
];
rules = [
{
host = "irc.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "gamja";
port.number = 80;
};
}
{
path = "/socket";
pathType = "Prefix";
backend.service = {
name = "soju-ws";
port.number = 80;
};
}
];
}
];
};
};
kubernetes.resources.configMaps.soju = {
metadata.namespace = "irc";
data.config = ''
listen ircs://
listen unix+admin:///app/admin
listen ws+insecure://
listen http+prometheus://localhost:9090
hostname irc.gmem.ca
title irc.gmem.ca
db postgres "dbname=soju"
message-store db
tls /ssl/tls.crt /ssl/tls.key
'';
};
}

View file

@ -1,35 +0,0 @@
{
lib,
config,
kubenix,
...
}: {
imports = [
kubenix.modules.k8s
kubenix.modules.helm
(import ./custom.nix)
(import ./nginx.nix)
(import ./tclip.nix)
(import ./vrchat-prometheus-exporter.nix)
(import ./overseerr.nix)
(import ./immich.nix)
(import ./endpoints.nix)
(import ./homepage.nix)
(import ./cloudflare-exporter.nix)
(import ./piped.nix)
# (import ./conduit.nix)
(import ./irc.nix)
# (import ./netboot.nix)
(import ./nitter.nix)
# (import ./changedetection.nix)
(import ./nextdns-exporter.nix)
(import ./nitter-bot.nix)
(import ./miniflux.nix)
# (import ./snikket.nix)
(import ./metube.nix)
(import ./searxng.nix)
(import ./redlib.nix)
(import ./minecraft-invites.nix)
(import ./duplikate.nix)
];
}

View file

@ -1,63 +0,0 @@
let
appName = "metube";
appImage = "ghcr.io/alexta69/metube";
in {
kubernetes.resources.services.metube = {
metadata.namespace = "metube";
spec = {
selector.app = "metube";
ports.http = {
port = 8081;
targetPort = 8081;
};
};
};
kubernetes.resources.deployments.metube = {
metadata.namespace = "metube";
spec = {
selector.matchLabels.app = "metube";
template = {
metadata.labels.app = "metube";
spec = {
containers = {
metube = {
image = appImage;
imagePullPolicy = "Always";
ports.http.containerPort = 8081;
};
};
};
};
};
};
kubernetes.resources.ingresses.metube = {
metadata.namespace = "metube";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
spec = {
tls = [
{
hosts = ["metube.gmem.ca"];
}
];
rules = [
{
host = "metube.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "metube";
port.number = 8081;
};
}
];
}
];
};
};
}

View file

@ -1,125 +0,0 @@
let
appName = "whitelistmanager";
appImage = "git.gmem.ca/arch/whitelistmanager";
frontendImage = "git.gmem.ca/arch/whitelistmanager-frontend";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.whitelistmanager = {
metadata.namespace = "minecraft-invites";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
whitelistmanager = {
image = appImage;
envFrom = [{secretRef.name = "whitelistmanager";}];
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 8080;
};
};
};
};
};
};
kubernetes.resources.deployments.whitelistmanager-frontend = {
metadata.namespace = "minecraft-invites";
spec = {
selector.matchLabels.app = appName + "-frontend";
template = {
metadata.labels.app = appName + "-frontend";
spec = {
containers = {
whitelistmanager = {
image = frontendImage;
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 3000;
};
};
};
};
};
};
kubernetes.resources.services.whitelistmanager = {
metadata.namespace = "minecraft-invites";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.services.whitelistmanager-frontend = {
metadata.namespace = "minecraft-invites";
metadata.labels.app = appName + "-frontend";
spec = {
selector.app = appName + "-frontend";
ports.http = {
port = 3000;
targetPort = 3000;
};
};
};
kubernetes.resources.ingresses.whitelistmanager = {
metadata.namespace = "minecraft-invites";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
spec = {
tls = [
{
hosts = ["minecraft-invites.gmem.ca"];
}
];
rules = [
{
host = "minecraft-invites.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "whitelistmanager-frontend";
port.number = 3000;
};
}
{
path = "/api";
pathType = "Prefix";
backend.service = {
name = "whitelistmanager";
port.number = 8080;
};
}
];
}
];
};
};
}

View file

@ -1,115 +0,0 @@
let
appName = "miniflux";
appImage = "docker.io/miniflux/miniflux";
functions = import ./functions.nix {};
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.deployments.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
miniflux = {
image = appImage;
envFrom = [
{secretRef.name = "miniflux";}
{configMapRef.name = config.kubernetes.resources.configMaps.miniflux.metadata.name;}
];
resources = {
requests = {
cpu = "1m";
memory = "256Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
ports.http.containerPort = 8080;
};
};
};
};
};
};
kubernetes.resources.services.miniflux = {
metadata.namespace = "miniflux";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.ingresses.miniflux = {
metadata.namespace = "miniflux";
metadata.annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
spec = {
tls = [
{
hosts = ["rss.gmem.ca"];
}
];
rules = [
{
host = "rss.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "miniflux";
port.number = 8080;
};
}
];
}
];
};
};
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.miniflux = {
metadata.namespace = "miniflux";
spec = {
selector.matchLabels.app = appName;
endpoints = [
{
port = "http";
interval = "60s";
}
];
};
};
kubernetes.resources.configMaps.miniflux = {
metadata.namespace = "miniflux";
data = {
CLEANUP_ARCHIVE_UNREAD_DAYS = "60";
METRICS_COLLECTOR = "1";
METRICS_ALLOWED_NETWORKS = "0.0.0.0/0";
BASE_URL = "https://rss.gmem.ca/";
RUN_MIGRATIONS = "1";
CREATE_ADMIN = "1";
OAUTH2_PROVIDER = "oidc";
OAUTH2_REDIRECT_URL = "https://rss.gmem.ca/oauth2/oidc/callback";
OAUTH2_OIDC_DISCOVERY_ENDPOINT = "https://authentik.gmem.ca/application/o/miniflux/";
OAUTH2_USER_CREATION = "1";
YOUTUBE_EMBED_URL_OVERRIDE = "https://piped.gmem.ca/embed/";
};
};
kubernetes.resources."secrets.infisical.com"."v1alpha1".InfisicalSecret.miniflux = functions.secret "miniflux";
}

View file

@ -1,121 +0,0 @@
let
appName = "netbootxyz";
netbootxyzImage = "ghcr.io/netbootxyz/netbootxyz";
in {
kubernetes.resources.services.netbootxyz = {
spec = {
selector.app = appName;
ports.http = {
port = 80;
targetPort = 80;
};
ports.interface = {
port = 3000;
targetPort = 3000;
};
};
};
kubernetes.resources.services.netbootxyz-tftp = {
spec = {
externalTrafficPolicy = "Local";
sessionAffinity = "None";
type = "NodePort";
selector.app = appName;
ports.tftp = {
port = 69;
protocol = "UDP";
targetPort = 69;
};
};
};
kubernetes.resources.deployments.netbootxyz.spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = [
{
name = "config";
persistentVolumeClaim.claimName = "netbootxyz-config";
}
{
name = "assets";
persistentVolumeClaim.claimName = "netbootxyz-assets";
}
];
containers = {
netbootxyz = {
image = netbootxyzImage;
imagePullPolicy = "Always";
volumeMounts = [
{
mountPath = "/config";
name = "config";
}
{
mountPath = "/assets";
name = "assets";
}
];
env.SUBFOLDER.value = "/ui/";
ports.http.containerPort = 80;
ports.interface.containerPort = 3000;
ports.tftp = {
containerPort = 69;
protocol = "UDP";
};
};
};
};
};
};
kubernetes.resources.persistentVolumeClaims.netbootxyz-config.spec = {
resources.requests.storage = "1Gi";
volumeMode = "Filesystem";
accessModes = ["ReadWriteMany"];
};
kubernetes.resources.persistentVolumeClaims.netbootxyz-assets.spec = {
resources.requests.storage = "10Gi";
volumeMode = "Filesystem";
accessModes = ["ReadWriteMany"];
};
kubernetes.resources.ingresses.netbootxyz = {
metadata.annotations = {
"cert-manager.io/issuer" = "le-issuer";
"nginx.ingress.kubernetes.io/ssl-redirect" = "false";
};
spec = {
tls = [
{
hosts = ["netboot.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [
{
host = "netboot.gmem.ca";
http.paths = [
{
path = "/ui";
pathType = "Prefix";
backend.service = {
name = "netbootxyz";
port.number = 3000;
};
}
{
path = "/";
pathType = "Prefix";
backend.service = {
name = "netbootxyz";
port.number = 80;
};
}
];
}
];
};
};
}

View file

@ -1,65 +0,0 @@
let
appName = "nextdns-exporter";
nextdns-exporterImage = "ghcr.io/raylas/nextdns-exporter:0.5.3";
in {
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.nextdns-exporter = {
metadata.namespace = "prometheus";
metadata.labels.app = appName;
spec = {
selector.matchLabels.app = appName;
endpoints = [
{
port = "metrics";
interval = "30s";
}
{
port = "ts-metrics";
interval = "30s";
}
];
};
};
kubernetes.resources.services.nextdns-exporter-metrics = {
metadata.namespace = "prometheus";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.metrics = {
port = 9948;
targetPort = 9948;
};
ports.ts-metrics = {
port = 9949;
targetPort = 9949;
};
};
};
kubernetes.resources.deployments.nextdns-exporter = {
metadata.namespace = "prometheus";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
nextdns-exporter = {
image = nextdns-exporterImage;
imagePullPolicy = "Always";
ports.metrics.containerPort = 9948;
envFrom = [{secretRef.name = "nextdns-exporter";}];
};
nextdns-ts-exporter = {
image = nextdns-exporterImage;
imagePullPolicy = "Always";
ports.metrics.containerPort = 9949;
env.METRICS_PORT.value = "9949";
envFrom = [{secretRef.name = "nextdns-ts-exporter";}];
};
};
};
};
};
};
}

View file

@ -1,42 +0,0 @@
{
lib,
config,
kubenix,
...
}: {
kubernetes.helm.releases.ingress-nginx = {
namespace = "ingress-nginx";
chart = kubenix.lib.helm.fetch {
repo = "https://kubernetes.github.io/ingress-nginx";
chart = "ingress-nginx";
version = "4.10.1";
sha256 = "BHRoXG5EtJdCGkzy52brAtEcMEZP+WkNtfBf+cwpNbs=";
};
values = {
controller = {
kind = "DaemonSet";
metrics = {
enabled = true;
serviceMonitor.enabled = true;
additionalLabels.release = "prometheus";
};
podAnnotations = {
"prometheus.io/scrape" = "true";
"prometheus.io/port" = "10254";
};
tolerations = [
{
key = "node-role.kubernetes.io/control-plane";
effect = "NoSchedule";
}
];
ingressClassResource.default = true;
publishService.enabled = true;
service.type = "NodePort";
service.externalTrafficPolicy = "Local";
hostNetwork = true;
extraArgs.default-ssl-certificate = "cert-manager/gmem-ca-wildcard";
};
};
};
}

View file

@ -1,50 +0,0 @@
let
appName = "nitter-bot";
appImage = "git.gmem.ca/arch/nitter-bot:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.statefulSets.nitter-bot = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
nitter-bot = {
image = appImage;
envFrom = [
{secretRef.name = "nitter-bot";}
{configMapRef.name = config.kubernetes.resources.configMaps.nitter-bot.metadata.name;}
];
resources = {
requests = {
cpu = "1m";
memory = "32Mi";
};
limits = {
cpu = "1";
memory = "128Mi";
};
};
};
};
};
};
};
};
kubernetes.resources.configMaps.nitter-bot = {
metadata.namespace = "nitter";
data = {
NITTER_URL = "http://nitter:8080";
NITTER_EXTERNAL_URL = "https://nitter.gmem.ca";
};
};
}

View file

@ -1,125 +0,0 @@
let
appName = "nitter";
nitterImage = "git.gmem.ca/arch/nitter:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.nitter = {
metadata.namespace = "nitter";
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
ports.readonly = {
port = 8081;
targetPort = 8081;
};
};
};
kubernetes.resources.deployments.nitter = {
metadata.namespace = "nitter";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "nitter";
accounts.secret.secretName = "nitter";
};
containers = {
nitter = {
image = nitterImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/src/nitter.conf";
subPath = "nitter.conf";
}
{
name = "accounts";
mountPath = "/src/guest_accounts.json";
subPath = "guest_accounts.json";
}
];
ports.http.containerPort = 8080;
};
nitter-ro = {
image = nitterImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/src/nitter.conf";
subPath = "nitter-ro.conf";
}
{
name = "accounts";
mountPath = "/src/guest_accounts.json";
subPath = "guest_accounts.json";
}
];
ports.http.containerPort = 8081;
};
};
};
};
};
};
kubernetes.helm.releases.nitter-redis = {
namespace = "nitter";
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
version = "18.6.1";
sha256 = "CyvGHc1v1BtbzDx6hbbPah2uWpUhlNIUQowephT6hmM=";
};
values = {
auth.enabled = false;
architecture = "standalone";
image = {
registry = "registry.redict.io";
repository = "redict";
tag = "7.3-compat";
};
};
};
kubernetes.resources.ingresses.nitter = {
metadata = {
name = appName;
namespace = "nitter";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["nitter.gmem.ca"];
}
];
rules = [
{
host = "nitter.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,87 +0,0 @@
let
appName = "jellyseerr";
appImage = "git.gmem.ca/arch/jellyseerr:postgres";
in {
kubernetes.resources.services.jellyseerr = {
metadata.namespace = "jellyseerr";
spec = {
selector.app = appName;
ports.http = {
port = 5055;
targetPort = 5055;
};
};
};
kubernetes.resources.deployments.jellyseerr = {
metadata.namespace = "jellyseerr";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "jellyseerr";
};
containers = {
jellyseerr = {
image = appImage;
envFrom = [
{secretRef.name = "jellyseerr";}
{configMapRef.name = "jellyseerr";}
];
volumeMounts = [
{
name = "config";
mountPath = "/app/config/settings.json";
subPath = "settings.json";
}
];
ports.http.containerPort = 5055;
resources = {
requests = {
cpu = "500m";
memory = "128Mi";
};
limits = {
cpu = "1";
memory = "512Mi";
};
};
};
};
};
};
};
};
kubernetes.resources.ingresses.jellyseerr = {
metadata = {
name = appName;
namespace = "jellyseerr";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["request-media.gmem.ca"];
}
];
rules = [
{
host = "request-media.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,16 +0,0 @@
persistentVolumeClaim:
enabled: true
storageClass: nfs-client
ingress:
enabled: true
annotations:
cert-manager.io/issuer: "le-issuer"
hosts:
- pihole.gmem.ca
tls:
- secretName: pihole-tls
hosts:
- pihole.gmem.ca
serviceDns:
loadBalancerIP: 192.168.50.146
type: LoadBalancer

View file

@ -1,101 +0,0 @@
{
lib,
config,
kubenix,
...
}: {
kubernetes.helm.releases.piped = {
namespace = "piped";
chart = kubenix.lib.helm.fetch {
repo = "https://helm.piped.video";
chart = "piped";
version = "5.0.0";
sha256 = "wfw0e37q52VW+bUMBmXILwUM0F1O1cH7Jk+6tmLAcS8=";
};
values = {
postgresql.enabled = false;
backend.config = {
FRONTEND_URL = "https://piped.gmem.ca";
API_URL = "https://pipedapi.gmem.ca";
PROXY_PART = "https://ytproxy.gmem.ca";
database.connection_url = "jdbc:postgresql://hippo-primary.default.svc:5432/piped";
database.secret = {
name = "hippo-pguser-piped";
username = "user";
password = "password";
};
};
frontend.env.BACKEND_HOSTNAME = "pipedapi.gmem.ca";
ingress = {
main = {
tls = [
{
hosts = ["piped.gmem.ca"];
}
];
hosts = [
{
host = "piped.gmem.ca";
paths = [{path = "/";}];
}
];
};
backend = {
tls = [
{
hosts = ["pipedapi.gmem.ca"];
}
];
hosts = [
{
host = "pipedapi.gmem.ca";
paths = [{path = "/";}];
}
];
};
ytproxy = {
tls = [
{
hosts = ["pipedproxy.gmem.ca"];
}
];
hosts = [
{
host = "ytproxy.gmem.ca";
paths = [{path = "/";}];
}
];
};
};
};
};
kubernetes.resources.cronJobs.piped-refresh = {
metadata.namespace = "piped";
spec = {
schedule = "*/30 * * * *";
jobTemplate.spec.template.spec = {
restartPolicy = "Never";
containers.refresh-subscriptions = {
image = "debian:bookworm-slim";
envFrom = [{secretRef.name = "postgres-piped";}];
command = [
"/bin/bash"
"-c"
''
apt update && apt install -y postgresql-client curl
curl -o /dev/null "https://healthchecks.gmem.ca/ping/$HEALTHCHECKS_UUID/start"
export PGPASSWORD=$password &&
export subs=$(psql -U piped -h 192.168.50.236 -qtAX -c 'select id from public.pubsub;') &&
while IFS= read -r line; do
echo "refreshing $line"
curl -k -o /dev/null "http://piped-backend:8080/channel/$line"
done < <(printf '%s' "$subs")
curl -o /dev/null "https://healthchecks.gmem.ca/ping/$HEALTHCHECKS_UUID"
''
];
};
};
};
};
}

View file

@ -1,108 +0,0 @@
apiVersion: postgres-operator.crunchydata.com/v1beta1
kind: PostgresCluster
metadata:
name: hippo
spec:
image: git.gmem.ca/arch/custom-postgres:15
imagePullPolicy: Always
postgresVersion: 15
databaseInitSQL:
key: init.sql
name: init-sql
instances:
- name: instance1
replicas: 1
dataVolumeClaimSpec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
patroni:
dynamicConfiguration:
postgresql:
parameters:
shared_preload_libraries: vectors
backups:
pgbackrest:
manual:
repoName: repo1
options:
- --type=full
image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.47-1
global:
repo1-retention-full: "14"
repo1-retention-full-type: time
repos:
- name: repo1
volume:
volumeClaimSpec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi
schedules:
full: "0 1 * * 0"
differential: "0 1 * * 1-6"
monitoring:
pgmonitor:
exporter:
image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.4.3-0
users:
- name: authentik
databases:
- authentik
- name: immich
databases:
- immich
- name: pterodactyl
databases:
- pterodactyl
- name: piped
databases:
- piped
- name: soju
databases:
- soju
- name: atuin
databases:
- atuin
---
apiVersion: v1
kind: ConfigMap
metadata:
name: init-sql
data:
init.sql: |
\c authentik
GRANT CREATE ON SCHEMA public TO "authentik";
\c immich
GRANT CREATE ON SCHEMA public TO "immich";
CREATE EXTENSION vectors;
\c pterodactyl
GRANT CREATE ON SCHEMA public TO "pterodactyl";
\c piped
GRANT CREATE ON SCHEMA public TO "piped";
\c soju
GRANT CREATE ON SCHEMA public TO "soju";
\c atuin
GRANT CREATE ON SCHEMA public TO "atuin";
---
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: hippo
spec:
selector:
matchLabels:
postgres-operator.crunchydata.com/cluster: hippo
postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true"
podTargetLabels:
- postgres-operator.crunchydata.com/cluster
- postgres-operator.crunchydata.com/role
- postgres-operator.crunchydata.com/instance
podMetricsEndpoints:
- port: exporter
interval: 30s

View file

@ -1,29 +0,0 @@
FROM registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.4-1
ARG TARGETARCH
USER root
RUN microdnf install wget binutils
RUN /bin/sh -c 'set -ex && \
ARCH=`uname -m` && \
if [ "$ARCH" == "x86_64" ]; then \
echo "x86_64" && \
wget -O vectors.deb https://github.com/tensorchord/pgvecto.rs/releases/download/v0.2.0/vectors-pg15_0.2.0_amd64.deb; \
elif [ "$ARCH" == "aarch64" ]; then \
echo "arm64" && \
wget -O vectors.deb https://github.com/tensorchord/pgvecto.rs/releases/download/v0.2.0/vectors-pg15_0.2.0_arm64.deb; \
else \
echo "unknown arch" && \
exit 1; \
fi'
RUN ar x vectors.deb && \
tar xvf data.tar.gz && \
mv ./usr/lib/postgresql/15/lib/* /usr/pgsql-15/lib/ && \
mv ./usr/share/postgresql/15/extension/* /usr/pgsql-15/share/extension && \
microdnf clean all && \
rm vectors.deb control.tar.gz data.tar.gz
USER 26

View file

@ -1,134 +0,0 @@
--- # Daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: promtail-daemonset
spec:
selector:
matchLabels:
name: promtail
template:
metadata:
labels:
name: promtail
spec:
serviceAccount: promtail-serviceaccount
containers:
- name: promtail-container
image: grafana/promtail
args:
- -config.file=/etc/promtail/promtail.yaml
env:
- name: 'HOSTNAME' # needed when using kubernetes_sd_configs
valueFrom:
fieldRef:
fieldPath: 'spec.nodeName'
volumeMounts:
- name: logs
mountPath: /var/log
- name: promtail-config
mountPath: /etc/promtail
- mountPath: /var/lib/docker/containers
name: varlibdockercontainers
readOnly: true
volumes:
- name: logs
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: promtail-config
configMap:
name: promtail-config
--- # configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: promtail-config
data:
promtail.yaml: |
server:
http_listen_port: 9080
grpc_listen_port: 0
clients:
- url: http://100.126.232.130:3030/loki/api/v1/push
positions:
filename: /tmp/positions.yaml
target_config:
sync_period: 10s
scrape_configs:
- job_name: pod-logs
kubernetes_sd_configs:
- role: pod
pipeline_stages:
- docker: {}
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: __host__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_pod_name
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
--- # Clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: promtail-clusterrole
rules:
- apiGroups: [""]
resources:
- nodes
- services
- pods
verbs:
- get
- watch
- list
--- # ServiceAccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: promtail-serviceaccount
--- # Rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: promtail-clusterrolebinding
subjects:
- kind: ServiceAccount
name: promtail-serviceaccount
namespace: promtail
roleRef:
kind: ClusterRole
name: promtail-clusterrole
apiGroup: rbac.authorization.k8s.io

View file

@ -1,74 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: protonmail-bridge
spec:
selector:
matchLabels:
app: protonmail-bridge
template:
metadata:
labels:
app: protonmail-bridge
spec:
containers:
- name: protonmail-bridge
image: shenxn/protonmail-bridge:2.3.0-build
resources:
limits:
memory: "512Mi"
cpu: "1"
requests:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 143
name: imap
- containerPort: 25
name: smtp
volumeMounts:
- name: data
mountPath: /root
readinessProbe:
tcpSocket:
port: 143
initialDelaySeconds: 3
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 143
initialDelaySeconds: 15
periodSeconds: 20
volumes:
- name: data
persistentVolumeClaim:
claimName: protonmail-bridge
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: protonmail-bridge
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storageClassName: nfs-client
---
apiVersion: v1
kind: Service
metadata:
name: protonmail-bridge
spec:
selector:
app: protonmail-bridge
ports:
- port: 1143
targetPort: 143
name: imap
- port: 1025
targetPort: 25
name: smtp
externalIPs:
- 100.120.232.77

View file

@ -1,96 +0,0 @@
let
appName = "pterodactyl-panel";
pterodactyl-panel-Image = "git.gmem.ca/arch/pterodactyl-panel:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.pterodactyl-panel = {
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.statefulSets.pterodactyl-panel.spec = {
selector.matchLabels.app = appName;
serviceName = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
pterodactyl-panel = {
image = pterodactyl-panel-Image;
imagePullPolicy = "Always";
ports.http.containerPort = 8080;
volumeMounts = [
{
name = "data";
mountPath = "/var/www/pterodactyl/storage/app";
}
];
envFrom = [{secretRef.name = "pterodactyl";}];
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "data";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "1Gi";
};
}
];
};
kubernetes.helm.releases.pterodactyl-redis = {
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
version = "18.6.1";
sha256 = "CyvGHc1v1BtbzDx6hbbPah2uWpUhlNIUQowephT6hmM=";
};
values = {
auth.enabled = false;
architecture = "standalone";
};
};
kubernetes.resources.ingresses.pterodactyl-panel = {
metadata = {
name = appName;
annotations = {
"cert-manager.io/issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["games.gmem.ca"];
secretName = "gmem-ca-wildcard";
}
];
rules = [
{
host = "games.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,85 +0,0 @@
let
appName = "redlib";
appImage = "git.gmem.ca/arch/redlib:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.redlib = {
metadata.namespace = "redlib";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.deployments.redlib = {
metadata.namespace = "redlib";
spec = {
selector.matchLabels.app = appName;
replicas = 2;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "redlib";
};
containers = {
redlib = {
image = appImage;
imagePullPolicy = "Always";
ports.http.containerPort = 8080;
resources = {
requests = {
cpu = "100m";
memory = "64Mi";
};
limits = {
memory = "128Mi";
};
};
};
};
};
};
};
};
kubernetes.resources.ingresses.redlib = {
metadata = {
name = appName;
namespace = "redlib";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["red.gmem.ca"];
}
];
rules = [
{
host = "red.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,73 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: registry
labels:
app: registry
spec:
containers:
- name: registry
image: registry:2.8.2
volumeMounts:
- name: registry-repo
mountPath: "/var/lib/registry"
resources:
limits:
memory: "256Mi"
cpu: "1"
requests:
memory: "64Mi"
cpu: "100m"
volumes:
- name: registry-repo
persistentVolumeClaim:
claimName: registry-repo
---
apiVersion: v1
kind: Service
metadata:
name: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-repo
namespace: default
spec:
resources:
requests:
storage: 2Gi
accessModes:
- ReadWriteOnce
storageClassName: nfs-client
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: container-registry
annotations:
cert-manager.io/issuer: "le-issuer"
nginx.ingress.kubernetes.io/proxy-body-size: 100m
namespace: default
spec:
tls:
- hosts:
- icr.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: icr.gmem.ca
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: registry
port:
number: 5000

View file

@ -1,146 +0,0 @@
let
appName = "searxng";
appImage = "docker.io/searxng/searxng:latest";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.searxng = {
metadata.namespace = "searxng";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.deployments.searxng = {
metadata.namespace = "searxng";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = config.kubernetes.resources.configMaps.searxng.metadata.name;
};
containers = {
searxng = {
image = appImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/etc/searxng/settings.yml";
subPath = "settings.yml";
}
{
name = "config";
mountPath = "/etc/searxng/limiter.toml";
subPath = "limiter.toml";
}
];
envFrom = [{secretRef.name = "searxng";}];
ports.http.containerPort = 8080;
resources = {
requests = {
cpu = "100m";
memory = "512Mi";
};
limits = {
memory = "1Gi";
};
};
};
};
};
};
};
};
kubernetes.resources.configMaps.searxng = {
metadata.namespace = "searxng";
data."settings.yml" = ''
use_default_settings: true
server:
image_proxy: true
http_protocol_version: "1.1"
method: "GET"
ui:
static_use_hash: true
redis:
url: redis://searxng-redis-master:6379/0
general:
instance_name: search.gmem.ca
hostname_replace:
'(.*\.)?youtube\.com$': 'piped.gmem.ca'
'(.*\.)?youtu\.be$': 'piped.gmem.ca'
'(.*\.)?youtube-noocookie\.com$': 'piped.gmem.ca'
'(www\.)?twitter\.com$': 'nitter.gmem.ca'
'(www\.)?x\.com$': 'nitter.gmem.ca'
'(.*\.)?reddit\.com$': 'red.gmem.ca'
'';
data."limiter.toml" = ''
# This configuration file updates the default configuration file
# See https://github.com/searxng/searxng/blob/master/searx/botdetection/limiter.toml
[botdetection.ip_limit]
# activate link_token method in the ip_limit method
link_token = true
'';
};
kubernetes.helm.releases.searxng-redis = {
namespace = "searxng";
chart = kubenix.lib.helm.fetch {
repo = "https://charts.bitnami.com/bitnami";
chart = "redis";
version = "18.6.1";
sha256 = "CyvGHc1v1BtbzDx6hbbPah2uWpUhlNIUQowephT6hmM=";
};
values = {
auth.enabled = false;
architecture = "standalone";
image = {
registry = "registry.redict.io";
repository = "redict";
tag = "7.3-compat";
};
};
};
kubernetes.resources.ingresses.searxng = {
metadata = {
name = appName;
namespace = "searxng";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["search.gmem.ca"];
}
];
rules = [
{
host = "search.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,150 +0,0 @@
let
appName = "snikket";
snikketImage = "git.gmem.ca/arch/snikket-server:latest";
snikketPortalImage = "snikket/snikket-web-portal:stable";
in
{
lib,
config,
kubenix,
...
}: {
kubernetes.resources.services.snikket = {
metadata.namespace = "snikket";
spec = {
selector.app = appName;
ports.http = {
port = 5280;
targetPort = 5280;
};
};
};
kubernetes.resources.services.snikket-xmpp = {
metadata.namespace = "snikket";
spec = {
type = "NodePort";
selector.app = appName;
ports.http = {
port = 5222;
targetPort = 5222;
nodePort = 5222;
};
};
};
kubernetes.resources.services.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.app = appName + "-web-portal";
ports.http = {
port = 5765;
targetPort = 5765;
};
};
};
kubernetes.resources.deployments.snikket = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
snikket = {
image = snikketImage;
env.SNIKKET_TWEAK_TURNSERVER.value = "0";
env.SNIKKET_TWEAK_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
envFrom = [{configMapRef.name = "snikket";}];
imagePullPolicy = "Always";
volumeMounts = [
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.crt";
subPath = "tls.crt";
}
{
name = "certs";
mountPath = "/etc/prosody/certs/chat.gmem.ca.key";
subPath = "tls.key";
}
];
ports.http.containerPort = 5280;
};
};
volumes = {
certs.secret.secretName = "chat-gmem-ca";
};
};
};
};
};
kubernetes.resources.deployments.snikket-web-portal = {
metadata.namespace = "snikket";
spec = {
selector.matchLabels.app = appName + "-web-portal";
template = {
metadata.labels.app = appName + "-web-portal";
spec = {
containers = {
snikket = {
image = snikketPortalImage;
env.SNIKKET_TWEAK_PORTAL_INTERNAL_HTTP_INTERFACE.value = "0.0.0.0";
env.SNIKKET_WEB_PROSODY_ENDPOINT.value = "http://snikket:5280";
imagePullPolicy = "Always";
ports.http.containerPort = 5765;
};
};
};
};
};
};
kubernetes.resources.ingresses.snikket = {
metadata = {
name = appName;
namespace = "snikket";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["chat.gmem.ca"];
}
];
rules = [
{
host = "chat.gmem.ca";
http.paths =
[
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName + "-web-portal";
port.name = "http";
};
}
]
++ lib.lists.forEach [
# Routes we want to hit Prosody's backend
"/admin_api"
"/invites_api"
"/invites_bootstrap"
"/upload"
"/http-bind"
"/xmpp-websocket"
"/.well-known/host-meta"
"/.well-known/host-meta.json"
] (path: {
path = path;
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
});
}
];
};
};
}

View file

@ -1,11 +0,0 @@
apiVersion: v1
data:
grocy.yml: |-
dbs:
- path: /config/data/grocy.db
replicas:
- url: s3://gmem-archival/litestream/grocy
kind: ConfigMap
metadata:
name: grocy-litestream
namespace: default

View file

@ -1,100 +0,0 @@
let
appName = "tclip";
tclipImage = "git.gmem.ca/arch/tclip:arm";
in {
kubernetes.resources.statefulSets.tclip = {
metadata.namespace = "tclip";
spec = {
serviceName = appName;
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
containers = {
tclip = {
image = tclipImage;
imagePullPolicy = "Always";
env = [
{
name = "DATA_DIR";
value = "/state";
}
{
name = "USE_FUNNEL";
value = "true";
}
{
name = "HTTP_PORT";
value = "8080";
}
{
name = "ENABLE_METRICS";
value = "true";
}
];
ports.http.containerPort = 8080;
envFrom = [{secretRef.name = "tclip";}];
volumeMounts = [
{
name = "state";
mountPath = "/state";
}
];
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "state";
spec = {
storageClassName = "nfs-client";
accessModes = ["ReadWriteOnce"];
resources.requests.storage = "512Mi";
};
}
];
};
};
kubernetes.resources.services.tclip = {
metadata.namespace = "tclip";
spec = {
selector.app = appName;
ports.http = {
port = 8080;
targetPort = 8080;
};
};
};
kubernetes.resources.ingresses.tclip = {
metadata = {
name = appName;
namespace = "tclip";
annotations = {
"cert-manager.io/cluster-issuer" = "le-issuer";
};
};
spec = {
tls = [
{
hosts = ["paste.gmem.ca"];
}
];
rules = [
{
host = "paste.gmem.ca";
http.paths = [
{
path = "/";
pathType = "Prefix";
backend.service = {
name = appName;
port.name = "http";
};
}
];
}
];
};
};
}

View file

@ -1,80 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vaultwarden
spec:
replicas: 1
selector:
matchLabels:
app: vaultwarden
template:
metadata:
labels:
app: vaultwarden
spec:
volumes:
- name: config
configMap:
name: vaultwarden
- name: data
emptyDir: {}
containers:
- name: vaultwarden
image: docker.io/vaultwarden/server:testing
imagePullPolicy: Always
resources:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "64Mi"
cpu: "100m"
envFrom:
- secretRef:
name: vaultwarden
ports:
- containerPort: 80
name: web
volumeMounts:
- name: data
mountPath: /data
- name: config
mountPath: /data/config.json
subPath: vaultwarden.json
---
apiVersion: v1
kind: Service
metadata:
name: vaultwarden
labels:
app: vaultwarden
spec:
selector:
app: vaultwarden
ports:
- port: 80
targetPort: 80
name: web
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: vaultwarden
annotations:
cert-manager.io/cluser-issuer: "le-issuer"
spec:
tls:
- hosts:
- pw.gmem.ca
secretName: gmem-ca-wildcard
rules:
- host: pw.gmem.ca
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: vaultwarden
port:
number: 80

View file

@ -1,67 +0,0 @@
let
appName = "vrchat-prometheus-exporter";
appImage = "git.gmem.ca/arch/vrchat-prometheus-adapter:arm";
in {
kubernetes.resources."monitoring.coreos.com"."v1".ServiceMonitor.vrchat-prometheus-adapter = {
metadata.namespace = "vrchat";
spec = {
selector.matchLabels.app = appName;
endpoints = [
{
port = "metrics";
interval = "60s";
}
];
};
};
kubernetes.resources.services.vrchat-prometheus-adapter = {
metadata.namespace = "vrchat";
metadata.labels.app = appName;
spec = {
selector.app = appName;
ports.metrics = {
port = 6534;
targetPort = 6534;
};
};
};
kubernetes.resources.deployments.vrchat-prometheus-adapter = {
metadata.namespace = "vrchat";
spec = {
selector.matchLabels.app = appName;
template = {
metadata.labels.app = appName;
spec = {
volumes = {
config.configMap.name = "vrchat-prometheus-adapter";
};
containers = {
vrchat-prometheus-adapter = {
image = appImage;
imagePullPolicy = "Always";
volumeMounts = [
{
name = "config";
mountPath = "/config.toml";
subPath = "config.toml";
}
];
envFrom = [{secretRef.name = "vrchat-prometheus-adapter";}];
ports.metrics.containerPort = 6534;
resources = {
requests = {
cpu = "50m";
memory = "32Mi";
};
limits = {
cpu = "500m";
memory = "256Mi";
};
};
};
};
};
};
};
};
}

View file

@ -1,29 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: webdev-support-bot
spec:
selector:
matchLabels:
app: webdev-support-bot
template:
metadata:
labels:
app: webdev-support-bot
spec:
containers:
- name: webdev-support-bot
image: icr.gmem.ca/webdev-support-bot
resources:
limits:
memory: "512Mi"
cpu: "500m"
requests:
memory: "128Mi"
cpu: "1m"
envFrom:
- secretRef:
name: webdev-support-bot
- configMapRef:
name: webdev-support-bot

View file

@ -3,7 +3,7 @@ authentik:
enabled: false
global:
image:
tag: 2024.4.2
tag: 2024.6.0
env:
- name: AUTHENTIK_WEB__THREADS
value: "2"

View file

@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: authentik
helmCharts:
- name: authentik
repo: https://charts.goauthentik.io
releaseName: authentik
namespace: authentik
version: 2024.6.0
valuesFile: ./authentik.yml
kubeVersion: "1.30"

View file

@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: cloudflared
image: cloudflare/cloudflared:2024.4.1
image: cloudflare/cloudflared:2024.6.1
args:
- tunnel
- --config

View file

@ -0,0 +1,20 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: cloudflare
resources:
- cloudflared.yml
helmCharts:
- name: cloudflare-exporter
releaseName: cloudflare-exporter
version: 0.2.1
repo: https://lablabs.github.io/cloudflare-exporter
valuesInline:
image:
tag: "0.0.16"
secretRef: "cloudflare-exporter"
serviceMonitor:
enabled: true
labels:
release: "prometheus"
kubeVersion: "1.30"

View file

@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: duplikate
namespace: duplikate
spec:
selector:
matchLabels:
app: duplikate
template:
metadata:
labels:
app: duplikate
spec:
containers:
- env:
- name: REDIS_URL
value: redis://duplikate-redis-master
envFrom:
- secretRef:
name: duplikate
image: git.gmem.ca/arch/duplikate:latest
name: duplikate
resources:
limits:
cpu: '1'
memory: 128Mi
requests:
cpu: 10m
memory: 32Mi

View file

@ -0,0 +1,27 @@
apiVersion: secrets.infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: duplikate
namespace: duplikate
spec:
authentication:
kubernetesAuth:
identityId: 68d1f432-7b0a-4e4a-b439-acbbbc160f1e
secretsScope:
envSlug: prod
projectSlug: kubernetes-homelab-dp67
secretsPath: /duplikate
serviceAccountRef:
name: infisical-auth
namespace: infisical
hostAPI: http://infisical:8080
managedSecretReference:
creationPolicy: Owner
secretName: duplikate
secretNamespace: duplikate
resyncInterval: 10

View file

@ -0,0 +1,20 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: duplikate
resources:
- Deployment-duplikate.yaml
- InfisicalSecret-duplikate.yaml
helmCharts:
- name: redis
releaseName: duplikate-redis
version: 18.6.1
repo: https://charts.bitnami.com/bitnami
valuesInline:
auth:
enabled: false
architecture: standalone
image:
registry: registry.redict.io
repository: redict
tag: 7.3-compat

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Endpoints
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: austin
namespace: endpoints
subsets:
- addresses:
- ip: 192.168.50.237
ports:
- name: austin
port: 8080
protocol: TCP

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Endpoints
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: git
namespace: endpoints
subsets:
- addresses:
- ip: 192.168.50.229
ports:
- name: git
port: 443
protocol: TCP

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Endpoints
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: ibiza
namespace: endpoints
subsets:
- addresses:
- ip: 192.168.50.182
ports:
- name: ibiza
port: 8000
protocol: TCP

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Endpoints
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: proxmox
namespace: endpoints
subsets:
- addresses:
- ip: 192.168.50.3
ports:
- name: proxmox
port: 8006
protocol: TCP

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Endpoints
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: tokyo
namespace: endpoints
subsets:
- addresses:
- ip: 192.168.50.124
ports:
- name: tokyo
port: 8000
protocol: TCP

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluser-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/proxy-body-size: 10g
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: austin
namespace: endpoints
spec:
rules:
- host: austin.gmem.ca
http:
paths:
- backend:
service:
name: austin
port:
number: 8080
path: /
pathType: Prefix
tls:
- hosts:
- austin.gmem.ca

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluser-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/proxy-body-size: 10g
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: git
namespace: endpoints
spec:
rules:
- host: git.gmem.ca
http:
paths:
- backend:
service:
name: git
port:
number: 443
path: /
pathType: Prefix
tls:
- hosts:
- git.gmem.ca

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluser-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/proxy-body-size: 10g
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: ibiza
namespace: endpoints
spec:
rules:
- host: ibiza.gmem.ca
http:
paths:
- backend:
service:
name: ibiza
port:
number: 8000
path: /
pathType: Prefix
tls:
- hosts:
- ibiza.gmem.ca

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluser-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/proxy-body-size: 10g
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: proxmox
namespace: endpoints
spec:
rules:
- host: proxmox.gmem.ca
http:
paths:
- backend:
service:
name: proxmox
port:
number: 8006
path: /
pathType: Prefix
tls:
- hosts:
- proxmox.gmem.ca

View file

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluser-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/proxy-body-size: 10g
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: tokyo
namespace: endpoints
spec:
rules:
- host: tokyo.gmem.ca
http:
paths:
- backend:
service:
name: tokyo
port:
number: 8000
path: /
pathType: Prefix
tls:
- hosts:
- tokyo.gmem.ca

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: austin
namespace: endpoints
spec:
ports:
- name: austin
port: 8080
targetPort: 8080

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: git
namespace: endpoints
spec:
ports:
- name: git
port: 443
targetPort: 443

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: ibiza
namespace: endpoints
spec:
ports:
- name: ibiza
port: 8000
targetPort: 8000

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: proxmox
namespace: endpoints
spec:
ports:
- name: proxmox
port: 8006
targetPort: 8006

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: tokyo
namespace: endpoints
spec:
ports:
- name: tokyo
port: 8000
targetPort: 8000

View file

@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- Endpoints-austin.yaml
- Endpoints-git.yaml
- Endpoints-ibiza.yaml
- Endpoints-proxmox.yaml
- Endpoints-tokyo.yaml
- Service-austin.yaml
- Service-git.yaml
- Service-ibiza.yaml
- Service-proxmox.yaml
- Service-tokyo.yaml
- Ingress-austin.yaml
- Ingress-git.yaml
- Ingress-ibiza.yaml
- Ingress-proxmox.yaml
- Ingress-tokyo.yaml

View file

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
labels:
app.kubernetes.io/app: homepage
spec:
selector:
matchLabels:
app.kubernetes.io/name: homepage
template:
metadata:
labels:
app.kubernetes.io/name: homepage
spec:
containers:
- name: homepage
envFrom:
- secretRef:
name: homepage-config

View file

@ -0,0 +1,209 @@
replicaCount: 2
image:
repository: ghcr.io/gethomepage/homepage
tag: latest
serviceAccount:
create: true
enableRbac: true
config:
bookmarks: []
services:
- Media:
- Plex:
icon: plex.png
href: https://app.plex.tv
description: Plex
widget:
type: plex
url: http://192.168.50.229:32400
key: "{{HOMEPAGE_VAR_PLEX_KEY}}"
- Jellyseerr:
icon: jellyseerr.png
href: https://request-media.gmem.ca
description: Request movies and TV shows
widget:
type: jellyseerr
url: https://request-media.gmem.ca
key: "{{HOMEPAGE_VAR_JELLYSEERR_KEY}}"
- Transmission:
icon: transmission.png
description: Download progress for torrents
widget:
type: transmission
url: http://192.168.50.187:9091
- "Personal Infrastructure":
- authentik:
icon: authentik.png
href: https://authentik.gmem.ca
description: OIDC SSO
- Tailscale:
icon: tailscale.png
href: https://login.tailscale.com
description: VPN provider
- Git:
icon: forgejo.png
href: https://git.gmem.ca
description: Git forge
- Grafana:
icon: grafana.png
href: https://grafana.gmem.ca
description: Monitoring & metrics
widget:
type: grafana
url: https://grafana.gmem.ca
username: api@localhost
password: "{{HOMEPAGE_VAR_GRAFANA_PASSWORD}}"
- NextDNS:
icon: nextdns.png
href: https://my.nextdns.io/bcee89/setup
description: DNS provider
widget:
type: nextdns
profile: bcee89
key: "{{HOMEPAGE_VAR_NEXTDNS_KEY}}"
- Proxmox:
icon: proxmox.png
href: https://proxmox.gmem.ca
description: Homelab proxmox
widget:
type: proxmox
url: https://proxmox.gmem.ca
username: api@pam!homepage
password: "{{HOMEPAGE_VAR_PROXMOX_PASSWORD}}"
- Immich:
icon: immich.png
href: https://photos.gmem.ca
description: Image hosting
widget:
type: immich
url: https://photos.gmem.ca
key: "{{HOMEPAGE_VAR_IMMICH_KEY}}"
- "NextDNS Tailscale":
icon: nextdns.png
href: https://my.nextdns.io/74c6db/setup
description: Tailnet DNS provider
widget:
type: nextdns
profile: 74c6db
key: "{{HOMEPAGE_VAR_NEXTDNS_KEY}}"
- "Paperless-ngx":
icon: paperless-ngx.png
href: https://docs.gmem.ca
description: Document storage and indexing
- Reading:
- miniflux:
icon: miniflux.png
href: https://rss.gmem.ca
description: Miniflux RSS Reader
- "Lobste.rs":
href: https://lobste.rs
description: News aggregator
- "Hacker News":
href: https://news.ycombinator.com
description: VC news aggregator
- "Floofy.tech Infrastructure":
- Mastodon:
icon: mastodon.png
href: https://floofy.tech
description: Primary Mastodon instance
widget:
type: mastodon
url: https://floofy.tech
- Grafana:
icon: grafana.png
href: https://grafana.services.floofy.tech
description: Metrics and Monitoring
- vSphere:
icon: vmware-esxi.png
href: https://vcenter.services.floofy.tech
description: Hypervisor Manager
- "vrclub.social":
icon: calckey.png
href: https://vrclub.social
description: Firefish instance for VR clubs
- "Tools":
- "IT Tools":
icon: it-tools.png
href: https://tools.gmem.ca
description: Various useful tools
- Cyberchef:
icon: cyberchef.png
href: https://gchq.github.io/CyberChef/
description: More useful tools, mostly text manipulation
- "Backup Status":
- "gsimmer backups":
icon: healthchecks.png
href: https://healthchecks.gmem.ca
description: Uptime monitor for recurring tasks
widget:
type: healthchecks
url: https://healthchecks.gmem.ca
key: "{{HOMEPAGE_VAR_HEALTHCHECKS_KEY}}"
uuid: 617d460f-69f6-444f-852a-421861543327
- "becki backups":
icon: healthchecks.png
href: https://healthchecks.gmem.ca
description: Uptime monitor for recurring tasks
widget:
type: healthchecks
url: https://healthchecks.gmem.ca
key: "{{HOMEPAGE_VAR_HEALTHCHECKS_KEY}}"
uuid: 9d01d3dd-2a56-4c70-9b5c-9cb99a1466db
- "apps backups":
icon: healthchecks.png
href: https://healthchecks.gmem.ca
description: Uptime monitor for recurring tasks
widget:
type: healthchecks
url: https://healthchecks.gmem.ca
key: "{{HOMEPAGE_VAR_HEALTHCHECKS_KEY}}"
uuid: 37a854b0-9191-4452-aa30-df3969d59b09
settings:
title: "Arch's Homepage"
providers:
openweathermap: "{{HOMEPAGE_VAR_WEATHER_KEY}}"
background:
image: "https://images.unsplash.com/photo-1502790671504-542ad42d5189?auto=format&fit=crop&w=2560&q=80"
blur: sm
opacity: 50
base: "https://home.gmem.ca"
layout:
Media:
style: row
columns: "3"
"Personal Infrastructure":
style: row
columns: "3"
"Backup Status":
style: row
columns: "3"
kubernetes:
mode: cluster
widgets:
- logo:
icon: https://gmem.ca/avatar.png
- kubernetes:
cluster:
show: true
nodes:
show: true
- search:
provider: duckduckgo
- openweathermap:
provider: openweathermap
cache: 5
units: metric
ingress:
main:
enabled: true
ingressClassName: nginx
hosts:
- host: home.gmem.ca
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- home.gmem.ca
secretName: gmem-ca-wildcard

View file

@ -0,0 +1,16 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: homepage
patches:
- path: ./deployment.yaml
helmCharts:
- name: homepage
repo: https://jameswynn.github.io/helm-charts
releaseName: homepage
namespace: homepage
version: 1.2.3
kubeVersion: "1.30"
valuesFile: ./homepage.yaml

View file

@ -0,0 +1,19 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: infisical
helmCharts:
- name: infisical-standalone
repo: https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts
releaseName: infisical
namespace: infisical
version: 1.0.8
valuesFile: ./infvalues.yml
kubeVersion: "1.30"
- name: secrets-operator
repo: https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts
releaseName: secrets-operator-1718466666
namespace: infisical
version: 0.6.2
kubeVersion: "1.30"

View file

@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ingress-nginx
helmCharts:
- name: ingress-nginx
repo: https://kubernetes.github.io/ingress-nginx
releaseName: ingress-nginx
namespace: ingress-nginx
version: 4.10.1
valuesFile: ./nginx.yaml
kubeVersion: "1.30"

View file

@ -0,0 +1,24 @@
controller:
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
enabled: true
additionalLabels:
release: prometheus
podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "10254"
tolerations:
- key: "node-role.kubernetes.io/control-plane"
effect: NoSchedule
ingressClassResource:
default: true
publishService:
enabled: true
service:
type: NodePort
externalTrafficPolicy: Local
hostNetwork: true
extraArgs:
default-ssl-certificate: cert-manager/gmem-ca-wildcard

View file

@ -0,0 +1,30 @@
apiVersion: v1
data:
config: 'listen ircs://
listen unix+admin:///app/admin
listen ws+insecure://
listen http+prometheus://localhost:9090
hostname irc.gmem.ca
title irc.gmem.ca
db postgres "dbname=soju"
message-store db
tls /ssl/tls.crt /ssl/tls.key
'
kind: ConfigMap
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: soju-4a44ac46db
namespace: irc

View file

@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: gamja
namespace: irc
spec:
selector:
matchLabels:
app: gamja
template:
metadata:
labels:
app: gamja
spec:
containers:
- image: git.gmem.ca/arch/gamja:latest
imagePullPolicy: Always
name: gamja
ports:
- containerPort: 80
name: http

View file

@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: soju
namespace: irc
spec:
selector:
matchLabels:
app: soju
template:
metadata:
labels:
app: soju
spec:
containers:
- env:
- name: PGDATABASE
valueFrom:
secretKeyRef:
key: dbname
name: postgres-soju
- name: PGHOST
value: 192.168.50.236
- name: PGPASSWORD
valueFrom:
secretKeyRef:
key: password
name: postgres-soju
- name: PGUSER
valueFrom:
secretKeyRef:
key: user
name: postgres-soju
image: git.gmem.ca/arch/soju:s3
imagePullPolicy: Always
name: soju
ports:
- containerPort: 6697
name: tls
- containerPort: 80
name: ws
volumeMounts:
- mountPath: /etc/soju/config
name: config
subPath: config
- mountPath: /ssl
name: ssl
volumes:
- configMap:
name: soju-4a44ac46db
name: config
- name: ssl
secret:
secretName: irc-gmem-ca

View file

@ -0,0 +1,42 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/proxy-read-timeout: '3600'
nginx.ingress.kubernetes.io/proxy-send-timeout: '3600'
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: irc
namespace: irc
spec:
rules:
- host: irc.gmem.ca
http:
paths:
- backend:
service:
name: gamja
port:
number: 80
path: /
pathType: Prefix
- backend:
service:
name: soju-ws
port:
number: 80
path: /socket
pathType: Prefix
- backend:
service:
name: soju-ws
port:
number: 80
path: /uploads
pathType: Prefix
tls:
- hosts:
- irc.gmem.ca

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: gamja
namespace: irc
spec:
ports:
- name: http
port: 80
targetPort: 80
selector:
app: gamja

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: soju-ws
namespace: irc
spec:
ports:
- name: ws
port: 80
targetPort: 80
selector:
app: soju

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: soju
namespace: irc
spec:
ports:
- name: tls
nodePort: 6697
port: 6697
targetPort: 6697
selector:
app: soju
type: NodePort

View file

@ -0,0 +1,22 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: irc-gmem-ca
namespace: irc
spec:
# Secret names are always required.
secretName: irc-gmem-ca
duration: 2160h # 90d
renewBefore: 360h # 15d
dnsNames:
- irc.gmem.ca
issuerRef:
name: le-issuer
# We can reference ClusterIssuers by changing the kind here.
# The default value is Issuer (i.e. a locally namespaced Issuer)
kind: ClusterIssuer
# This is optional since cert-manager will default to this value however
# if you are using an external issuer, change this to that issuer group.
group: cert-manager.io

View file

@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ConfigMap-soju-4a44ac46db.yaml
- Deployment-gamja.yaml
- Deployment-soju.yaml
- Service-gamja.yaml
- Service-soju.yaml
- Service-soju-ws.yaml
- Ingress-irc.yaml

View file

@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: jellyseerr
namespace: jellyseerr
spec:
selector:
matchLabels:
app: jellyseerr
template:
metadata:
labels:
app: jellyseerr
spec:
containers:
- envFrom:
- secretRef:
name: jellyseerr
- configMapRef:
name: jellyseerr
image: git.gmem.ca/arch/jellyseerr:postgres
name: jellyseerr
ports:
- containerPort: 5055
name: http
resources:
limits:
cpu: '1'
memory: 512Mi
requests:
cpu: 500m
memory: 128Mi
volumeMounts:
- mountPath: /app/config/settings.json
name: config
subPath: settings.json
volumes:
- configMap:
name: jellyseerr
name: config

View file

@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: jellyseerr
namespace: jellyseerr
spec:
rules:
- host: request-media.gmem.ca
http:
paths:
- backend:
service:
name: jellyseerr
port:
name: http
path: /
pathType: Prefix
tls:
- hosts:
- request-media.gmem.ca

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: jellyseerr
namespace: jellyseerr
spec:
ports:
- name: http
port: 5055
targetPort: 5055
selector:
app: jellyseerr

View file

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- Deployment-jellyseerr.yaml
- Service-jellyseerr.yaml
- Ingress-jellyseerr.yaml

View file

@ -0,0 +1,25 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- duplikate
- miniflux
- nitter
- piped
- searxng
- irc
- cloudflare
- jellyseerr
- librespeed
- metube
- prometheus
- redlib
- vrchat
- minecraft-invites
- tclip
- endpoints
- ingress-nginx
- homepage
- infisical
- nfs-subdir-external-provisioner
- misc

View file

@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: librespeed
namespace: librespeed
spec:
selector:
matchLabels:
app: librespeed
template:
metadata:
labels:
app: librespeed
spec:
containers:
- env:
- name: MODE
value: standalone
- name: PASSWORD
value: '123'
- name: TELEMETRY
value: 'true'
- name: WEBPORT
value: '8080'
image: git.gmem.ca/arch/librespeed:latest
imagePullPolicy: Always
name: librespeed
ports:
- containerPort: 8080
name: http
resources:
limits:
memory: 512Mi
requests:
cpu: 10m
memory: 30Mi
volumes:
- name: accounts
secret:
secretName: librespeed
- configMap:
name: librespeed
name: config

View file

@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
nginx.ingress.kubernetes.io/proxy-body-size: 1024m
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: librespeed
namespace: librespeed
spec:
rules:
- host: speed.gmem.ca
http:
paths:
- backend:
service:
name: librespeed
port:
name: http
path: /
pathType: Prefix
tls:
- hosts:
- librespeed.gmem.ca

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: librespeed
namespace: librespeed
spec:
ports:
- name: http
port: 8080
targetPort: 8080
selector:
app: librespeed

View file

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- Deployment-librespeed.yaml
- Service-librespeed.yaml
- Ingress-librespeed.yaml

View file

@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: metube
namespace: metube
spec:
selector:
matchLabels:
app: metube
template:
metadata:
labels:
app: metube
spec:
containers:
- image: ghcr.io/alexta69/metube
imagePullPolicy: Always
name: metube
ports:
- containerPort: 8081
name: http

View file

@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: le-issuer
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: metube
namespace: metube
spec:
rules:
- host: metube.gmem.ca
http:
paths:
- backend:
service:
name: metube
port:
number: 8081
path: /
pathType: Prefix
tls:
- hosts:
- metube.gmem.ca

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: metube
namespace: metube
spec:
ports:
- name: http
port: 8081
targetPort: 8081
selector:
app: metube

View file

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- Deployment-metube.yaml
- Service-metube.yaml
- Ingress-metube.yaml

View file

@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kubenix/k8s-version: '1.30'
kubenix/project-name: kubenix
labels:
kubenix/hash: e672eb08bf0db5ef675b3b6036ca047f43b4614f
name: whitelistmanager-frontend
namespace: minecraft-invites
spec:
selector:
matchLabels:
app: whitelistmanager-frontend
template:
metadata:
labels:
app: whitelistmanager-frontend
spec:
containers:
- image: git.gmem.ca/arch/whitelistmanager-frontend
name: whitelistmanager
ports:
- containerPort: 3000
name: http
resources:
limits:
cpu: '1'
memory: 512Mi
requests:
cpu: 1m
memory: 256Mi

Some files were not shown because too many files have changed in this diff Show more