remove chown so that fsgroup can be applied to the security context

This commit is contained in:
2026-01-14 22:59:28 +02:00
parent 43fc195249
commit 6bef6b3b6a
3 changed files with 236 additions and 229 deletions

View File

@@ -60,11 +60,14 @@ RUN apk add --no-cache \
bash bash
# Create piefed user and set up directories in a single layer # Create piefed user and set up directories in a single layer
# Note: /app/app/static/media is volume-mounted in K8s, fsGroup handles permissions there
# Other directories need explicit ownership for logging and temp files
RUN addgroup -g 1000 piefed \ RUN addgroup -g 1000 piefed \
&& adduser -u 1000 -G piefed -s /bin/sh -D piefed \ && adduser -u 1000 -G piefed -s /bin/sh -D piefed \
&& mkdir -p /app/logs /app/app/static/tmp /app/app/static/media \ && mkdir -p /app/logs /app/app/static/tmp /app/app/static/media \
/var/log/piefed /var/run/piefed \ /var/log/piefed /var/run/piefed \
&& chown -R piefed:piefed /var/log/piefed /var/run/piefed && chown -R piefed:piefed /app/logs /app/app/static/tmp \
/var/log/piefed /var/run/piefed
# Set working directory # Set working directory
WORKDIR /app WORKDIR /app
@@ -75,7 +78,7 @@ COPY --from=builder --chown=piefed:piefed /app /app
# Compile translations and set permissions in a single layer # Compile translations and set permissions in a single layer
RUN source /app/venv/bin/activate \ RUN source /app/venv/bin/activate \
&& (pybabel compile -d app/translations || true) \ && (pybabel compile -d app/translations || true) \
&& chmod 755 /app/logs /app/app/static/tmp /app/app/static/media && chmod 755 /app/logs /app/app/static/tmp
# Copy shared entrypoint utilities # Copy shared entrypoint utilities
COPY entrypoint-common.sh /usr/local/bin/entrypoint-common.sh COPY entrypoint-common.sh /usr/local/bin/entrypoint-common.sh

View File

@@ -20,107 +20,109 @@ spec:
app.kubernetes.io/component: web app.kubernetes.io/component: web
spec: spec:
serviceAccountName: piefed-init-checker serviceAccountName: piefed-init-checker
securityContext:
fsGroup: 1000 # piefed group - ensures volume mounts are writable
imagePullSecrets: imagePullSecrets:
- name: harbor-pull-secret - name: harbor-pull-secret
initContainers: initContainers:
- name: wait-for-migrations - name: wait-for-migrations
image: bitnami/kubectl@sha256:b407dcce69129c06fabab6c3eb35bf9a2d75a20d0d927b3f32dae961dba4270b image: bitnami/kubectl@sha256:b407dcce69129c06fabab6c3eb35bf9a2d75a20d0d927b3f32dae961dba4270b
command: command:
- sh - sh
- -c - -c
- | - |
echo "Checking database migration status..." echo "Checking database migration status..."
# Check if Job exists # Check if Job exists
if ! kubectl get job piefed-db-init -n piefed-application >/dev/null 2>&1; then if ! kubectl get job piefed-db-init -n piefed-application >/dev/null 2>&1; then
echo "ERROR: Migration job does not exist!" echo "ERROR: Migration job does not exist!"
echo "Expected job/piefed-db-init in piefed-application namespace" echo "Expected job/piefed-db-init in piefed-application namespace"
exit 1 exit 1
fi fi
# Check if Job is complete # Check if Job is complete
COMPLETE_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null) COMPLETE_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null)
if [ "$COMPLETE_STATUS" = "True" ]; then if [ "$COMPLETE_STATUS" = "True" ]; then
echo "✓ Migrations already complete, proceeding..." echo "✓ Migrations already complete, proceeding..."
exit 0 exit 0
fi fi
# Check if Job has failed # Check if Job has failed
FAILED_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null) FAILED_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null)
if [ "$FAILED_STATUS" = "True" ]; then if [ "$FAILED_STATUS" = "True" ]; then
echo "ERROR: Migration job has FAILED!" echo "ERROR: Migration job has FAILED!"
echo "Job status:" echo "Job status:"
kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")]}' | jq . kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")]}' | jq .
echo "" echo ""
echo "Recent events:" echo "Recent events:"
kubectl get events -n piefed-application --field-selector involvedObject.name=piefed-db-init --sort-by='.lastTimestamp' | tail -5 kubectl get events -n piefed-application --field-selector involvedObject.name=piefed-db-init --sort-by='.lastTimestamp' | tail -5
exit 1 exit 1
fi fi
# Job exists but is still running, wait for it # Job exists but is still running, wait for it
echo "Migration job running, waiting for completion..." echo "Migration job running, waiting for completion..."
kubectl wait --for=condition=complete --timeout=600s job/piefed-db-init -n piefed-application || { kubectl wait --for=condition=complete --timeout=600s job/piefed-db-init -n piefed-application || {
echo "ERROR: Migration job failed or timed out!" echo "ERROR: Migration job failed or timed out!"
exit 1 exit 1
} }
echo "✓ Migrations complete, starting web pod..." echo "✓ Migrations complete, starting web pod..."
containers: containers:
- name: piefed-web - name: piefed-web
image: <YOUR_REGISTRY_URL>/library/piefed-web:latest image: registry.keyboardvagabond.com/library/piefed-web:latest
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 80 - containerPort: 80
name: http name: http
envFrom: envFrom:
- configMapRef: - configMapRef:
name: piefed-config name: piefed-config
- secretRef: - secretRef:
name: piefed-secrets name: piefed-secrets
env: env:
- name: PYTHONUNBUFFERED - name: PYTHONUNBUFFERED
value: "1" value: "1"
- name: FLASK_DEBUG - name: FLASK_DEBUG
value: "0" # Keep production mode but enable better logging value: "0" # Keep production mode but enable better logging
- name: WERKZEUG_DEBUG_PIN - name: WERKZEUG_DEBUG_PIN
value: "off" value: "off"
resources: resources:
requests: requests:
cpu: 600m # Conservative reduction from 1000m considering 200-800x user growth cpu: 600m # Conservative reduction from 1000m considering 200-800x user growth
memory: 1.5Gi # Conservative reduction from 2Gi considering scaling needs memory: 1.5Gi # Conservative reduction from 2Gi considering scaling needs
limits: limits:
cpu: 2000m # Keep original limits for burst capacity at scale cpu: 2000m # Keep original limits for burst capacity at scale
memory: 4Gi # Keep original limits for growth memory: 4Gi # Keep original limits for growth
volumeMounts: volumeMounts:
- name: app-storage - name: app-storage
mountPath: /app/app/media mountPath: /app/app/media
subPath: media subPath: media
- name: app-storage - name: app-storage
mountPath: /app/app/static/media mountPath: /app/app/static/media
subPath: static subPath: static
- name: cache-storage - name: cache-storage
mountPath: /app/cache mountPath: /app/cache
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /health path: /health
port: 80 port: 80
initialDelaySeconds: 60 initialDelaySeconds: 60
periodSeconds: 30 periodSeconds: 30
timeoutSeconds: 10 timeoutSeconds: 10
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /health path: /health
port: 80 port: 80
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 10 periodSeconds: 10
timeoutSeconds: 5 timeoutSeconds: 5
volumes: volumes:
- name: app-storage - name: app-storage
persistentVolumeClaim: persistentVolumeClaim:
claimName: piefed-app-storage claimName: piefed-app-storage
- name: cache-storage - name: cache-storage
persistentVolumeClaim: persistentVolumeClaim:
claimName: piefed-cache-storage claimName: piefed-cache-storage
--- ---
apiVersion: autoscaling/v2 apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
@@ -135,15 +137,15 @@ spec:
minReplicas: 2 minReplicas: 2
maxReplicas: 6 maxReplicas: 6
metrics: metrics:
- type: Resource - type: Resource
resource: resource:
name: cpu name: cpu
target: target:
type: AverageValue type: AverageValue
averageValue: 1400m # 70% of 2000m limit - allow better CPU utilization averageValue: 1400m # 70% of 2000m limit - allow better CPU utilization
- type: Resource - type: Resource
resource: resource:
name: memory name: memory
target: target:
type: Utilization type: Utilization
averageUtilization: 90 averageUtilization: 200 #3GB of the 4 available

View File

@@ -20,116 +20,118 @@ spec:
app.kubernetes.io/component: worker app.kubernetes.io/component: worker
spec: spec:
serviceAccountName: piefed-init-checker serviceAccountName: piefed-init-checker
securityContext:
fsGroup: 1000 # piefed group - ensures volume mounts are writable
imagePullSecrets: imagePullSecrets:
- name: harbor-pull-secret - name: harbor-pull-secret
initContainers: initContainers:
- name: wait-for-migrations - name: wait-for-migrations
image: bitnami/kubectl@sha256:b407dcce69129c06fabab6c3eb35bf9a2d75a20d0d927b3f32dae961dba4270b image: bitnami/kubectl@sha256:b407dcce69129c06fabab6c3eb35bf9a2d75a20d0d927b3f32dae961dba4270b
command: command:
- sh - sh
- -c - -c
- | - |
echo "Checking database migration status..." echo "Checking database migration status..."
# Check if Job exists # Check if Job exists
if ! kubectl get job piefed-db-init -n piefed-application >/dev/null 2>&1; then if ! kubectl get job piefed-db-init -n piefed-application >/dev/null 2>&1; then
echo "ERROR: Migration job does not exist!" echo "ERROR: Migration job does not exist!"
echo "Expected job/piefed-db-init in piefed-application namespace" echo "Expected job/piefed-db-init in piefed-application namespace"
exit 1 exit 1
fi fi
# Check if Job is complete # Check if Job is complete
COMPLETE_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null) COMPLETE_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Complete")].status}' 2>/dev/null)
if [ "$COMPLETE_STATUS" = "True" ]; then if [ "$COMPLETE_STATUS" = "True" ]; then
echo "✓ Migrations already complete, proceeding..." echo "✓ Migrations already complete, proceeding..."
exit 0 exit 0
fi fi
# Check if Job has failed # Check if Job has failed
FAILED_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null) FAILED_STATUS=$(kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")].status}' 2>/dev/null)
if [ "$FAILED_STATUS" = "True" ]; then if [ "$FAILED_STATUS" = "True" ]; then
echo "ERROR: Migration job has FAILED!" echo "ERROR: Migration job has FAILED!"
echo "Job status:" echo "Job status:"
kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")]}' | jq . kubectl get job piefed-db-init -n piefed-application -o jsonpath='{.status.conditions[?(@.type=="Failed")]}' | jq .
echo "" echo ""
echo "Recent events:" echo "Recent events:"
kubectl get events -n piefed-application --field-selector involvedObject.name=piefed-db-init --sort-by='.lastTimestamp' | tail -5 kubectl get events -n piefed-application --field-selector involvedObject.name=piefed-db-init --sort-by='.lastTimestamp' | tail -5
exit 1 exit 1
fi fi
# Job exists but is still running, wait for it # Job exists but is still running, wait for it
echo "Migration job running, waiting for completion..." echo "Migration job running, waiting for completion..."
kubectl wait --for=condition=complete --timeout=600s job/piefed-db-init -n piefed-application || { kubectl wait --for=condition=complete --timeout=600s job/piefed-db-init -n piefed-application || {
echo "ERROR: Migration job failed or timed out!" echo "ERROR: Migration job failed or timed out!"
exit 1 exit 1
} }
echo "✓ Migrations complete, starting worker pod..." echo "✓ Migrations complete, starting worker pod..."
containers: containers:
- name: piefed-worker - name: piefed-worker
image: <YOUR_REGISTRY_URL>/library/piefed-worker:latest image: registry.keyboardvagabond.com/library/piefed-worker:latest
imagePullPolicy: Always imagePullPolicy: Always
envFrom: envFrom:
- configMapRef: - configMapRef:
name: piefed-config name: piefed-config
- secretRef: - secretRef:
name: piefed-secrets name: piefed-secrets
env: env:
- name: PYTHONUNBUFFERED - name: PYTHONUNBUFFERED
value: "1" value: "1"
- name: FLASK_DEBUG - name: FLASK_DEBUG
value: "0" # Keep production mode but enable better logging value: "0" # Keep production mode but enable better logging
- name: WERKZEUG_DEBUG_PIN - name: WERKZEUG_DEBUG_PIN
value: "off" value: "off"
# Celery Worker Logging Configuration # Celery Worker Logging Configuration
- name: CELERY_WORKER_HIJACK_ROOT_LOGGER - name: CELERY_WORKER_HIJACK_ROOT_LOGGER
value: "False" value: "False"
# Database connection pool overrides for worker (lower than web pods) # Database connection pool overrides for worker (lower than web pods)
- name: DB_POOL_SIZE - name: DB_POOL_SIZE
value: "5" # Workers need fewer connections than web pods value: "5" # Workers need fewer connections than web pods
- name: DB_MAX_OVERFLOW - name: DB_MAX_OVERFLOW
value: "10" # Lower overflow for background tasks value: "10" # Lower overflow for background tasks
resources: resources:
requests: requests:
cpu: 500m cpu: 500m
memory: 1Gi memory: 1Gi
limits: limits:
cpu: 2000m # Allow internal scaling to 5 workers cpu: 2000m # Allow internal scaling to 5 workers
memory: 3Gi # Increase for multiple workers memory: 3Gi # Increase for multiple workers
volumeMounts: volumeMounts:
- name: app-storage - name: app-storage
mountPath: /app/app/media mountPath: /app/app/media
subPath: media subPath: media
- name: app-storage - name: app-storage
mountPath: /app/app/static/media mountPath: /app/app/static/media
subPath: static subPath: static
- name: cache-storage - name: cache-storage
mountPath: /app/cache mountPath: /app/cache
livenessProbe: livenessProbe:
exec: exec:
command: command:
- python - python
- -c - -c
- "import os,redis,urllib.parse; u=urllib.parse.urlparse(os.environ['CELERY_BROKER_URL']); r=redis.Redis(host=u.hostname, port=u.port, password=u.password, db=int(u.path[1:]) if u.path else 0); r.ping()" - "import os,redis,urllib.parse; u=urllib.parse.urlparse(os.environ['CELERY_BROKER_URL']); r=redis.Redis(host=u.hostname, port=u.port, password=u.password, db=int(u.path[1:]) if u.path else 0); r.ping()"
initialDelaySeconds: 60 initialDelaySeconds: 60
periodSeconds: 60 periodSeconds: 60
timeoutSeconds: 10 timeoutSeconds: 10
readinessProbe: readinessProbe:
exec: exec:
command: command:
- python - python
- -c - -c
- "import os,redis,urllib.parse; u=urllib.parse.urlparse(os.environ['CELERY_BROKER_URL']); r=redis.Redis(host=u.hostname, port=u.port, password=u.password, db=int(u.path[1:]) if u.path else 0); r.ping()" - "import os,redis,urllib.parse; u=urllib.parse.urlparse(os.environ['CELERY_BROKER_URL']); r=redis.Redis(host=u.hostname, port=u.port, password=u.password, db=int(u.path[1:]) if u.path else 0); r.ping()"
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 30 periodSeconds: 30
timeoutSeconds: 5 timeoutSeconds: 5
volumes: volumes:
- name: app-storage - name: app-storage
persistentVolumeClaim: persistentVolumeClaim:
claimName: piefed-app-storage claimName: piefed-app-storage
- name: cache-storage - name: cache-storage
persistentVolumeClaim: persistentVolumeClaim:
claimName: piefed-cache-storage claimName: piefed-cache-storage
--- ---
apiVersion: autoscaling/v2 apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler kind: HorizontalPodAutoscaler
@@ -144,15 +146,15 @@ spec:
minReplicas: 1 minReplicas: 1
maxReplicas: 2 maxReplicas: 2
metrics: metrics:
- type: Resource - type: Resource
resource: resource:
name: cpu name: cpu
target: target:
type: Utilization type: Utilization
averageUtilization: 375 averageUtilization: 375
- type: Resource - type: Resource
resource: resource:
name: memory name: memory
target: target:
type: Utilization type: Utilization
averageUtilization: 250 averageUtilization: 250