Remove custom setup.

This commit is contained in:
2025-06-24 17:50:43 -07:00
parent f1fe4f9cc2
commit 36196f5095
43 changed files with 0 additions and 11769 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +0,0 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-internal-wild-cloud
namespace: cert-manager
spec:
secretName: wildcard-internal-wild-cloud-tls
dnsNames:
- "*.internal.cloud2.payne.io"
- "internal.cloud2.payne.io"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
duration: 2160h # 90 days
renewBefore: 360h # 15 days
privateKey:
algorithm: RSA
size: 2048

View File

@@ -1,12 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- letsencrypt-staging-dns01.yaml
- letsencrypt-prod-dns01.yaml
- internal-wildcard-certificate.yaml
- wildcard-certificate.yaml
# Note: cert-manager.yaml contains the main installation manifests
# but is applied separately via URL in the install script

View File

@@ -1,26 +0,0 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
email: paul@payne.io
privateKeySecretRef:
name: letsencrypt-prod
server: https://acme-v02.api.letsencrypt.org/directory
solvers:
# DNS-01 solver for wildcard certificates
- dns01:
cloudflare:
email: paul@payne.io
apiTokenSecretRef:
name: cloudflare-api-token
key: api-token
selector:
dnsZones:
- "payne.io"
# Keep the HTTP-01 solver for non-wildcard certificates
- http01:
ingress:
class: traefik

View File

@@ -1,26 +0,0 @@
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
email: paul@payne.io
privateKeySecretRef:
name: letsencrypt-staging
server: https://acme-staging-v02.api.letsencrypt.org/directory
solvers:
# DNS-01 solver for wildcard certificates
- dns01:
cloudflare:
email: paul@payne.io
apiTokenSecretRef:
name: cloudflare-api-token
key: api-token
selector:
dnsZones:
- "payne.io"
# Keep the HTTP-01 solver for non-wildcard certificates
- http01:
ingress:
class: traefik

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager

View File

@@ -1,19 +0,0 @@
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-wild-cloud
namespace: cert-manager
spec:
secretName: wildcard-wild-cloud-tls
dnsNames:
- "*.cloud2.payne.io"
- "cloud2.payne.io"
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
duration: 2160h # 90 days
renewBefore: 360h # 15 days
privateKey:
algorithm: RSA
size: 2048

View File

@@ -1,28 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-custom
namespace: kube-system
data:
# Custom server block for internal domains. All internal domains should
# resolve to the cluster proxy.
internal.server: |
internal.cloud2.payne.io {
errors
cache 30
reload
template IN A {
match (.*)\.internal\.cloud2\.payne\.io\.
answer "{{ .Name }} 60 IN A 192.168.8.20"
}
template IN AAAA {
match (.*)\.internal\.cloud2\.payne\.io\.
rcode NXDOMAIN
}
}
# Custom override to set external resolvers.
external.override: |
forward . 1.1.1.1 {
max_concurrent 1000
}

View File

@@ -1,25 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: coredns-lb
namespace: kube-system
annotations:
metallb.universe.tf/loadBalancerIPs: "192.168.8.20"
spec:
type: LoadBalancer
ports:
- name: dns
port: 53
protocol: UDP
targetPort: 53
- name: dns-tcp
port: 53
protocol: TCP
targetPort: 53
- name: metrics
port: 9153
protocol: TCP
targetPort: 9153
selector:
k8s-app: kube-dns

View File

@@ -1,36 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: docker-registry
labels:
app: docker-registry
spec:
replicas: 1
selector:
matchLabels:
app: docker-registry
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: docker-registry
spec:
containers:
- image: registry:3.0.0
name: docker-registry
ports:
- containerPort: 5000
protocol: TCP
volumeMounts:
- mountPath: /var/lib/registry
name: docker-registry-storage
readOnly: false
volumes:
- name: docker-registry-storage
persistentVolumeClaim:
claimName: docker-registry-pvc

View File

@@ -1,20 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: docker-registry
spec:
rules:
- host: docker-registry.internal.cloud2.payne.io
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: docker-registry
port:
number: 5000
tls:
- hosts:
- docker-registry.internal.cloud2.payne.io
secretName: wildcard-internal-wild-cloud-tls

View File

@@ -1,14 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: docker-registry
labels:
- includeSelectors: true
pairs:
app: docker-registry
managedBy: wild-cloud
resources:
- deployment.yaml
- ingress.yaml
- service.yaml
- namespace.yaml
- pvc.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: docker-registry

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: docker-registry-pvc
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 10Gi

View File

@@ -1,13 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: docker-registry
labels:
app: docker-registry
spec:
ports:
- port: 5000
targetPort: 5000
selector:
app: docker-registry

View File

@@ -1,39 +0,0 @@
---
# CloudFlare provider for ExternalDNS
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: externaldns
spec:
selector:
matchLabels:
app: external-dns
strategy:
type: Recreate
template:
metadata:
labels:
app: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.13.4
args:
- --source=service
- --source=ingress
- --txt-owner-id=cloud-payne-io-cluster
- --provider=cloudflare
- --domain-filter=payne.io
#- --exclude-domains=internal.${DOMAIN}
- --cloudflare-dns-records-per-page=5000
- --publish-internal-services
- --no-cloudflare-proxied
- --log-level=debug
env:
- name: CF_API_TOKEN
valueFrom:
secretKeyRef:
name: cloudflare-api-token
key: api-token

View File

@@ -1,35 +0,0 @@
---
# Common RBAC resources for all ExternalDNS deployments
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: externaldns
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
rules:
- apiGroups: [""]
resources: ["services", "endpoints", "pods"]
verbs: ["get", "watch", "list"]
- apiGroups: ["extensions", "networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: externaldns

View File

@@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- externaldns-rbac.yaml
- externaldns-cloudflare.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: externaldns

View File

@@ -1,32 +0,0 @@
---
# Service Account and RBAC for Dashboard admin access
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: dashboard-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kubernetes-dashboard
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
# Token for dashboard-admin
apiVersion: v1
kind: Secret
metadata:
name: dashboard-admin-token
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: dashboard-admin
type: kubernetes.io/service-account-token

View File

@@ -1,84 +0,0 @@
---
# Internal-only middleware
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: internal-only
namespace: kubernetes-dashboard
spec:
ipWhiteList:
# Restrict to local private network ranges
sourceRange:
- 127.0.0.1/32 # localhost
- 10.0.0.0/8 # Private network
- 172.16.0.0/12 # Private network
- 192.168.0.0/16 # Private network
---
# HTTPS redirect middleware
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: dashboard-redirect-scheme
namespace: kubernetes-dashboard
spec:
redirectScheme:
scheme: https
permanent: true
---
# IngressRoute for Dashboard
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kubernetes-dashboard-https
namespace: kubernetes-dashboard
spec:
entryPoints:
- websecure
routes:
- match: Host(`dashboard.internal.cloud2.payne.io`)
kind: Rule
middlewares:
- name: internal-only
namespace: kubernetes-dashboard
services:
- name: kubernetes-dashboard
port: 443
serversTransport: dashboard-transport
tls:
secretName: wildcard-internal-wild-cloud-tls
---
# HTTP to HTTPS redirect.
# FIXME: Is this needed?
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: kubernetes-dashboard-http
namespace: kubernetes-dashboard
spec:
entryPoints:
- web
routes:
- match: Host(`dashboard.internal.cloud2.payne.io`)
kind: Rule
middlewares:
- name: dashboard-redirect-scheme
namespace: kubernetes-dashboard
services:
- name: kubernetes-dashboard
port: 443
serversTransport: dashboard-transport
---
# ServersTransport for HTTPS backend with skip verify.
# FIXME: Is this needed?
apiVersion: traefik.io/v1alpha1
kind: ServersTransport
metadata:
name: dashboard-transport
namespace: kubernetes-dashboard
spec:
insecureSkipVerify: true
serverName: dashboard.internal.cloud2.payne.io

View File

@@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- dashboard-admin-rbac.yaml
- dashboard-kube-system.yaml

View File

@@ -1,5 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- longhorn.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +0,0 @@
namespace: metallb-system
resources:
- pool.yaml

View File

@@ -1,19 +0,0 @@
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
namespace: metallb-system
spec:
addresses:
- 192.168.8.20-192.168.8.29
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2-advertisement
namespace: metallb-system
spec:
ipAddressPools:
- first-pool

View File

@@ -1,3 +0,0 @@
namespace: metallb-system
resources:
- github.com/metallb/metallb/config/native?ref=v0.15.0

View File

@@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- persistent-volume.yaml
- storage-class.yaml

View File

@@ -1,23 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-media-pv
labels:
storage: nfs-media
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
server: box-01
path: /data/media
mountOptions:
- nfsvers=4.1
- rsize=1048576
- wsize=1048576
- hard
- intr
- timeo=600

View File

@@ -1,10 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs
provisioner: nfs
parameters:
server: box-01
path: /data/media
reclaimPolicy: Retain
allowVolumeExpansion: true

View File

@@ -1,13 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: Middleware
metadata:
name: internal-only
namespace: kube-system
spec:
ipWhiteList:
# Restrict to local private network ranges - adjust these to match your network
sourceRange:
- 127.0.0.1/32 # localhost
- 10.0.0.0/8 # Private network
- 172.16.0.0/12 # Private network
- 192.168.0.0/16 # Private network

View File

@@ -1,13 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- templates/deployment.yaml
- templates/gatewayclass.yaml
- templates/gateway.yaml
- templates/ingressclass.yaml
- templates/ingressroute.yaml
- templates/rbac/clusterrolebinding.yaml
- templates/rbac/clusterrole.yaml
- templates/rbac/serviceaccount.yaml
- templates/service.yaml

View File

@@ -1,4 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: traefik

View File

@@ -1,130 +0,0 @@
---
# Source: traefik/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
annotations:
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 1
minReadySeconds: 0
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9100"
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
spec:
serviceAccountName: traefik
automountServiceAccountToken: true
terminationGracePeriodSeconds: 60
hostNetwork: false
containers:
- image: docker.io/traefik:v3.4.1
imagePullPolicy: IfNotPresent
name: traefik
resources:
readinessProbe:
httpGet:
path: /ping
port: 8080
scheme: HTTP
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /ping
port: 8080
scheme: HTTP
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
lifecycle:
ports:
- name: metrics
containerPort: 9100
protocol: TCP
- name: traefik
containerPort: 8080
protocol: TCP
- name: web
containerPort: 8000
protocol: TCP
- name: websecure
containerPort: 8443
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
volumeMounts:
- name: data
mountPath: /data
- name: tmp
mountPath: /tmp
args:
- "--global.checkNewVersion"
- "--entryPoints.metrics.address=:9100/tcp"
- "--entryPoints.traefik.address=:8080/tcp"
- "--entryPoints.web.address=:8000/tcp"
- "--entryPoints.websecure.address=:8443/tcp"
- "--api.dashboard=true"
- "--ping=true"
- "--metrics.prometheus=true"
- "--metrics.prometheus.entrypoint=metrics"
- "--providers.kubernetescrd"
- "--providers.kubernetescrd.allowEmptyServices=true"
- "--providers.kubernetesingress"
- "--providers.kubernetesingress.allowEmptyServices=true"
- "--providers.kubernetesingress.ingressendpoint.publishedservice=traefik/traefik"
- "--providers.kubernetesgateway"
- "--providers.kubernetesgateway.statusaddress.service.name=traefik"
- "--providers.kubernetesgateway.statusaddress.service.namespace=traefik"
- "--entryPoints.websecure.http.tls=true"
- "--log.level=INFO"
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: data
emptyDir: {}
- name: tmp
emptyDir: {}
securityContext:
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532

View File

@@ -1,18 +0,0 @@
---
# Source: traefik/templates/gateway.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: traefik-gateway
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
spec:
gatewayClassName: traefik
listeners:
- name: web
port: 8000
protocol: HTTP

View File

@@ -1,13 +0,0 @@
---
# Source: traefik/templates/gatewayclass.yaml
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
metadata:
name: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
spec:
controllerName: traefik.io/gateway-controller

View File

@@ -1,15 +0,0 @@
---
# Source: traefik/templates/ingressclass.yaml
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
name: traefik
spec:
controller: traefik.io/ingress-controller

View File

@@ -1,21 +0,0 @@
---
# Source: traefik/templates/ingressroute.yaml
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: traefik-dashboard
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
spec:
entryPoints:
- web
routes:
- match: Host(`dashboard.localhost`)
kind: Rule
services:
- kind: TraefikService
name: api@internal

View File

@@ -1,108 +0,0 @@
---
# Source: traefik/templates/rbac/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
- nodes
- services
verbs:
- get
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.io
resources:
- ingressroutes
- ingressroutetcps
- ingressrouteudps
- middlewares
- middlewaretcps
- serverstransports
- serverstransporttcps
- tlsoptions
- tlsstores
- traefikservices
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
- secrets
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- gateway.networking.k8s.io
resources:
- backendtlspolicies
- gatewayclasses
- gateways
- grpcroutes
- httproutes
- referencegrants
- tcproutes
- tlsroutes
verbs:
- get
- list
- watch
- apiGroups:
- gateway.networking.k8s.io
resources:
- backendtlspolicies/status
- gatewayclasses/status
- gateways/status
- grpcroutes/status
- httproutes/status
- tcproutes/status
- tlsroutes/status
verbs:
- update

View File

@@ -1,19 +0,0 @@
---
# Source: traefik/templates/rbac/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik

View File

@@ -1,14 +0,0 @@
---
# Source: traefik/templates/rbac/serviceaccount.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: traefik
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
annotations:
automountServiceAccountToken: false

View File

@@ -1,27 +0,0 @@
---
# Source: traefik/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: traefik
labels:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
helm.sh/chart: traefik-36.1.0
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: LoadBalancer
selector:
app.kubernetes.io/name: traefik
app.kubernetes.io/instance: traefik-traefik
ports:
- port: 80
name: web
targetPort: web
protocol: TCP
- port: 443
name: websecure
targetPort: websecure
protocol: TCP

View File

@@ -1,28 +0,0 @@
---
# Traefik service configuration with static LoadBalancer IP
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: kube-system
annotations:
# Get a stable IP from MetalLB
metallb.universe.tf/address-pool: production
metallb.universe.tf/allow-shared-ip: traefik-lb
labels:
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik
spec:
type: LoadBalancer
loadBalancerIP: { { .cluster.loadBalancerIP } }
selector:
app.kubernetes.io/instance: traefik-kube-system
app.kubernetes.io/name: traefik
ports:
- name: web
port: 80
targetPort: web
- name: websecure
port: 443
targetPort: websecure
externalTrafficPolicy: Local