infra: reconcile staging admin and runner manifests

This commit is contained in:
Seth Call 2026-03-13 17:11:34 -05:00
parent 1cbc690180
commit 8b0b8b0221
6 changed files with 174 additions and 61 deletions

View File

@ -8,7 +8,7 @@ metadata:
spec:
project: default
source:
repoURL: https://bitbucket.org/jamkazam/video-iac.git
repoURL: 'git@bitbucket.org:jamkazam/video-iac.git'
targetRevision: HEAD
path: k8s/jam-cloud
directory:

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: act-runner-config
namespace: jam-cloud-infra
data:
config.yaml: |
log:
level: debug # Increased to debug to catch the image pull error
runner:
capacity: 1
timeout: 3h
workdir_parent: "/data"
labels:
- "ubuntu-latest:docker://node:16-bullseye"
- "ubuntu-22.04:docker://node:16-bullseye"
- "dagger:docker://nixpkgs/nix:latest"
container:
network: ""
privileged: true
force_pull: true
options: "-v /var/run/docker/docker.sock:/var/run/docker/docker.sock -e DOCKER_HOST=unix:///var/run/docker/docker.sock"
valid_volumes:
- "**"

View File

@ -1,21 +1,15 @@
apiVersion: v1
kind: ConfigMap
kind: PersistentVolumeClaim
metadata:
name: act-runner-config
name: act-runner-data
namespace: jam-cloud-infra
data:
config.yaml: |
log:
level: info
runner:
capacity: 1
timeout: 3h
container:
network: ""
# Give the job container access to the Docker daemon so Dagger can spin up its engine
options: "-v /var/run/docker.sock:/var/run/docker.sock"
valid_volumes:
- "**"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: linode-block-storage-retain
---
apiVersion: apps/v1
kind: Deployment
@ -26,6 +20,8 @@ metadata:
app: act-runner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: act-runner
@ -34,9 +30,19 @@ spec:
labels:
app: act-runner
spec:
imagePullSecrets:
- name: gitea-registry
containers:
- name: runner
image: gitea/act_runner:latest
image: gitea/act_runner:0.3.0
workingDir: /data
resources:
requests:
cpu: 250m
memory: 512Mi
limits:
cpu: 2000m
memory: 8Gi
env:
- name: CONFIG_FILE
value: /etc/act_runner/config.yaml
@ -48,15 +54,29 @@ spec:
value: "k8s-runner"
- name: GITEA_RUNNER_LABELS
value: "ubuntu-latest:docker://node:16-bullseye,ubuntu-22.04:docker://node:16-bullseye,dagger:docker://nixpkgs/nix:latest"
- name: DOCKER_HOST
value: unix:///var/run/docker/docker.sock
- name: DOCKER_API_VERSION
value: "1.41"
securityContext:
privileged: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
mountPath: /var/run/docker
- name: config
mountPath: /etc/act_runner
- name: data
mountPath: /data
- name: dind
image: docker:23.0.5-dind
image: docker:27-dind
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 4000m
memory: 8Gi
command: ["dockerd", "--host=unix:///var/run/docker/docker.sock", "--tls=false", "--insecure-registry=gitea.jam-cloud-infra.svc.cluster.local"]
env:
- name: DOCKER_TLS_CERTDIR
value: ""
@ -64,10 +84,15 @@ spec:
privileged: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
mountPath: /var/run/docker
- name: data
mountPath: /data
volumes:
- name: docker-sock
emptyDir: {}
- name: data
persistentVolumeClaim:
claimName: act-runner-data
- name: config
configMap:
name: act-runner-config

View File

@ -0,0 +1,67 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: egress-proxy-config
namespace: jam-cloud-infra
data:
haproxy.cfg: |
defaults
mode tcp
timeout connect 5s
timeout client 1m
timeout server 1m
listen postgres
bind *:5432
# Using hostname as requested
server db int.jamkazam.com:5432
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: egress-proxy
namespace: jam-cloud-infra
spec:
replicas: 1
selector:
matchLabels:
app: egress-proxy
template:
metadata:
labels:
app: egress-proxy
spec:
hostNetwork: true
nodeSelector:
role: egress-gateway
tolerations:
- key: "dedicated"
operator: "Equal"
value: "media"
effect: "NoSchedule"
containers:
- name: haproxy
image: haproxy:alpine
ports:
- containerPort: 5432
volumeMounts:
- name: config
mountPath: /usr/local/etc/haproxy/haproxy.cfg
subPath: haproxy.cfg
volumes:
- name: config
configMap:
name: egress-proxy-config
---
apiVersion: v1
kind: Service
metadata:
name: egress-proxy
namespace: jam-cloud-infra
spec:
selector:
app: egress-proxy
ports:
- protocol: TCP
port: 5432
targetPort: 5432

View File

@ -8,7 +8,7 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
storage: 50Gi
storageClassName: linode-block-storage-retain
---
apiVersion: v1
@ -25,6 +25,7 @@ data:
[repository]
ROOT = /data/git/repositories
ALLOWED_SCHEMES = http,https,ssh,git
DEFAULT_REPO_UNITS = repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.actions
[repository.local]
LOCAL_COPY_PATH = /data/gitea/tmp/local-repo
@ -109,6 +110,8 @@ metadata:
app: gitea
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: gitea
@ -168,7 +171,7 @@ metadata:
namespace: jam-cloud-infra
annotations:
cert-manager.io/cluster-issuer: letsencrypt-nginx-production
nginx.ingress.kubernetes.io/proxy-body-size: "512m"
nginx.ingress.kubernetes.io/proxy-body-size: "2048m"
spec:
ingressClassName: nginx
tls:

View File

@ -1,8 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
name: jam-cloud
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -22,13 +17,19 @@ spec:
- name: gitea-registry
containers:
- name: web
# This will be replaced by the Dagger build pipeline on first run
image: git.staging.jamkazam.com/seth/jam-cloud-admin:1773165989
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
env:
- name: RAILS_ENV
value: "production"
value: production
- name: RAILS_LOG_TO_STDOUT
value: "true"
- name: RAILS_SERVE_STATIC_FILES
@ -36,27 +37,21 @@ spec:
- name: SECRET_KEY_BASE
value: "a7b8c9d0e1f2g3h4i5j6k7l8m9n0o1p2q3r4s5t6u7v8w9x0y1z2a3b4c5d6e7f8"
- name: DATABASE_URL
value: "postgres://jam:jam@72.14.176.182:5432/jam"
value: postgres://jam:jam@72.14.176.182:5432/jam
- name: AWS_KEY
value: "AKIAJAXEHQBDOZ5WAWKA"
value: AKIAJAXEHQBDOZ5WAWKA
- name: AWS_SECRET
value: "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"
value: DSu5p7qMrtZx6KqlkaC1/lqUQdFpEFu27lZ/SRz8
- name: AWS_ACCESS_KEY_ID
value: "AKIAJAXEHQBDOZ5WAWKA"
value: AKIAJAXEHQBDOZ5WAWKA
- name: AWS_SECRET_ACCESS_KEY
value: "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"
value: DSu5p7qMrtZx6KqlkaC1/lqUQdFpEFu27lZ/SRz8
- name: AWS_REGION
value: "us-east-1"
value: us-east-1
- name: AWS_BUCKET
value: "jamkazam-staging"
value: jamkazam-staging
- name: AWS_BUCKET_PUBLIC
value: "jamkazam-staging-public"
readinessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
value: jamkazam-staging-public
---
apiVersion: v1
kind: Service
@ -64,12 +59,11 @@ metadata:
name: admin
namespace: jam-cloud
spec:
ports:
- port: 80
targetPort: 3000
selector:
app: admin
ports:
- protocol: TCP
port: 80
targetPort: 3000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
@ -77,21 +71,21 @@ metadata:
name: admin
namespace: jam-cloud
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-nginx-production
spec:
ingressClassName: nginx
tls:
- secretName: admin-tls
hosts:
- admin.staging.jamkazam.com
- hosts:
- admin.staging.jamkazam.com
secretName: admin-tls
rules:
- host: admin.staging.jamkazam.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: admin
port:
number: 80
- host: admin.staging.jamkazam.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: admin
port:
number: 80