redaction (#1)

Add the redacted source file for demo purposes

Reviewed-on: https://source.michaeldileo.org/michael_dileo/Keybard-Vagabond-Demo/pulls/1
Co-authored-by: Michael DiLeo <michael_dileo@proton.me>
Co-committed-by: Michael DiLeo <michael_dileo@proton.me>
This commit was merged in pull request #1.
This commit is contained in:
2025-12-24 13:40:47 +00:00
committed by michael_dileo
parent 612235d52b
commit 7327d77dcd
333 changed files with 39286 additions and 1 deletions

View File

@@ -0,0 +1,27 @@
FROM piefed-base AS piefed-worker
# Install additional packages needed for worker container
RUN apk add --no-cache redis
# Worker-specific Python configuration for background processing
RUN echo "import sys" > /app/worker_config.py && \
echo "sys.path.append('/app')" >> /app/worker_config.py
# Copy worker-specific configuration files
COPY supervisord-worker.conf /etc/supervisor/conf.d/supervisord.conf
COPY entrypoint-worker.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Create worker directories and set permissions
RUN mkdir -p /var/log/supervisor /var/log/celery \
&& chown -R piefed:piefed /var/log/celery
# Health check for worker container (check celery status)
HEALTHCHECK --interval=60s --timeout=10s --start-period=60s --retries=3 \
CMD su-exec piefed celery -A celery_worker_docker.celery inspect ping || exit 1
# Run as root to manage processes
USER root
ENTRYPOINT ["/entrypoint.sh"]
CMD ["supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]

View File

@@ -0,0 +1,78 @@
#!/bin/sh
set -e
# Source common functions
. /usr/local/bin/entrypoint-common.sh
log "Starting PieFed worker container..."
# Run common startup sequence (without migrations)
export PIEFED_INIT_CONTAINER=false
common_startup
# Worker-specific initialization
log "Initializing worker container..."
# Apply dual logging configuration (file + stdout for OpenObserve)
log "Configuring dual logging for OpenObserve..."
# Setup dual logging (file + stdout) directly
python -c "
import logging
import sys
def setup_dual_logging():
'''Add stdout handlers to existing loggers without disrupting file logging'''
# Create a shared console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(
'%(asctime)s [%(name)s] %(levelname)s: %(message)s'
))
# Add console handler to key loggers (in addition to their existing file handlers)
loggers_to_enhance = [
'flask.app', # Flask application logger
'werkzeug', # Web server logger
'celery', # Celery worker logger
'celery.task', # Celery task logger
'celery.worker', # Celery worker logger
'' # Root logger
]
for logger_name in loggers_to_enhance:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# Check if this logger already has a stdout handler
has_stdout_handler = any(
isinstance(h, logging.StreamHandler) and h.stream == sys.stdout
for h in logger.handlers
)
if not has_stdout_handler:
logger.addHandler(console_handler)
print('Dual logging configured: file + stdout for OpenObserve')
# Call the function
setup_dual_logging()
"
# Test Redis connection specifically
log "Testing Redis connection for Celery..."
python -c "
import redis
import os
r = redis.Redis(
host=os.environ.get('REDIS_HOST', 'redis'),
port=int(os.environ.get('REDIS_PORT', 6379)),
password=os.environ.get('REDIS_PASSWORD')
)
r.ping()
print('Redis connection successful')
"
# Start worker services via supervisor
log "Starting worker services (celery worker + beat)..."
exec "$@"

View File

@@ -0,0 +1,29 @@
[supervisord]
nodaemon=true
user=root
logfile=/dev/stdout
logfile_maxbytes=0
pidfile=/var/run/supervisord.pid
silent=false
[program:celery-worker]
command=celery -A celery_worker_docker.celery worker --autoscale=5,1 --queues=celery,background,send --loglevel=info --task-events
user=piefed
directory=/app
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autorestart=true
priority=100
startsecs=10
stopasgroup=true
killasgroup=true
environment=FLASK_APP="pyfedi.py",CELERY_HIJACK_ROOT_LOGGER="false",CELERY_SEND_TASK_EVENTS="true",CELERY_TASK_TRACK_STARTED="true"
# Note: PieFed appears to use cron jobs instead of celery beat for scheduling
# The cron jobs are handled via Kubernetes CronJob resources
[group:piefed-worker]
programs=celery-worker
priority=999