158 lines
5.4 KiB
YAML
158 lines
5.4 KiB
YAML
---
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
metadata:
|
|
name: piefed-worker
|
|
namespace: piefed-application
|
|
labels:
|
|
app.kubernetes.io/name: piefed
|
|
app.kubernetes.io/component: worker
|
|
spec:
|
|
replicas: 1
|
|
selector:
|
|
matchLabels:
|
|
app.kubernetes.io/name: piefed
|
|
app.kubernetes.io/component: worker
|
|
template:
|
|
metadata:
|
|
labels:
|
|
app.kubernetes.io/name: piefed
|
|
app.kubernetes.io/component: worker
|
|
spec:
|
|
serviceAccountName: piefed-init-checker
|
|
imagePullSecrets:
|
|
- name: harbor-pull-secret
|
|
initContainers:
|
|
- name: wait-for-migrations
|
|
image: bitnami/kubectl@sha256:b407dcce69129c06fabab6c3eb35bf9a2d75a20d0d927b3f32dae961dba4270b
|
|
command:
|
|
- sh
|
|
- -c
|
|
- |
|
|
echo "Checking database migration status..."
|
|
|
|
# Check if Job exists
|
|
if ! kubectl get job piefed-db-init -n piefed-application >/dev/null 2>&1; then
|
|
echo "ERROR: Migration job does not exist!"
|
|
echo "Expected job/piefed-db-init in piefed-application namespace"
|
|
exit 1
|
|
fi
|
|
|
|
# Check if Job is complete
|
|
COMPLETE_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null)
|
|
if [ "$COMPLETE_STATUS" = "True" ]; then
|
|
echo "✓ Migrations already complete, proceeding..."
|
|
exit 0
|
|
fi
|
|
|
|
# Check if Job has failed
|
|
FAILED_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null)
|
|
if [ "$FAILED_STATUS" = "True" ]; then
|
|
echo "ERROR: Migration job has FAILED!"
|
|
echo "Job status:"
|
|
kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")]}' | jq .
|
|
echo ""
|
|
echo "Recent events:"
|
|
kubectl get events -n piefed-application --field-selector involvedObject.name=piefed-db-init --sort-by='.lastTimestamp' | tail -5
|
|
exit 1
|
|
fi
|
|
|
|
# Job exists but is still running, wait for it
|
|
echo "Migration job running, waiting for completion..."
|
|
kubectl wait --for=condition=complete --timeout=600s job/piefed-db-init -n piefed-application || {
|
|
echo "ERROR: Migration job failed or timed out!"
|
|
exit 1
|
|
}
|
|
|
|
echo "✓ Migrations complete, starting worker pod..."
|
|
containers:
|
|
- name: piefed-worker
|
|
image: <YOUR_REGISTRY_URL>/library/piefed-worker:latest
|
|
imagePullPolicy: Always
|
|
envFrom:
|
|
- configMapRef:
|
|
name: piefed-config
|
|
- secretRef:
|
|
name: piefed-secrets
|
|
env:
|
|
- name: PYTHONUNBUFFERED
|
|
value: "1"
|
|
- name: FLASK_DEBUG
|
|
value: "0" # Keep production mode but enable better logging
|
|
- name: WERKZEUG_DEBUG_PIN
|
|
value: "off"
|
|
# Celery Worker Logging Configuration
|
|
- name: CELERY_WORKER_HIJACK_ROOT_LOGGER
|
|
value: "False"
|
|
# Database connection pool overrides for worker (lower than web pods)
|
|
- name: DB_POOL_SIZE
|
|
value: "5" # Workers need fewer connections than web pods
|
|
- name: DB_MAX_OVERFLOW
|
|
value: "10" # Lower overflow for background tasks
|
|
resources:
|
|
requests:
|
|
cpu: 500m
|
|
memory: 1Gi
|
|
limits:
|
|
cpu: 2000m # Allow internal scaling to 5 workers
|
|
memory: 3Gi # Increase for multiple workers
|
|
volumeMounts:
|
|
- name: app-storage
|
|
mountPath: /app/app/media
|
|
subPath: media
|
|
- name: app-storage
|
|
mountPath: /app/app/static/media
|
|
subPath: static
|
|
- name: cache-storage
|
|
mountPath: /app/cache
|
|
livenessProbe:
|
|
exec:
|
|
command:
|
|
- python
|
|
- -c
|
|
- "import os,redis,urllib.parse; u=urllib.parse.urlparse(os.environ['CELERY_BROKER_URL']); r=redis.Redis(host=u.hostname, port=u.port, password=u.password, db=int(u.path[1:]) if u.path else 0); r.ping()"
|
|
initialDelaySeconds: 60
|
|
periodSeconds: 60
|
|
timeoutSeconds: 10
|
|
readinessProbe:
|
|
exec:
|
|
command:
|
|
- python
|
|
- -c
|
|
- "import os,redis,urllib.parse; u=urllib.parse.urlparse(os.environ['CELERY_BROKER_URL']); r=redis.Redis(host=u.hostname, port=u.port, password=u.password, db=int(u.path[1:]) if u.path else 0); r.ping()"
|
|
initialDelaySeconds: 30
|
|
periodSeconds: 30
|
|
timeoutSeconds: 5
|
|
volumes:
|
|
- name: app-storage
|
|
persistentVolumeClaim:
|
|
claimName: piefed-app-storage
|
|
- name: cache-storage
|
|
persistentVolumeClaim:
|
|
claimName: piefed-cache-storage
|
|
---
|
|
apiVersion: autoscaling/v2
|
|
kind: HorizontalPodAutoscaler
|
|
metadata:
|
|
name: piefed-worker-hpa
|
|
namespace: piefed-application
|
|
spec:
|
|
scaleTargetRef:
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
name: piefed-worker
|
|
minReplicas: 1
|
|
maxReplicas: 2
|
|
metrics:
|
|
- type: Resource
|
|
resource:
|
|
name: cpu
|
|
target:
|
|
type: Utilization
|
|
averageUtilization: 375
|
|
- type: Resource
|
|
resource:
|
|
name: memory
|
|
target:
|
|
type: Utilization
|
|
averageUtilization: 250 |