# Use the official Python 3.9 slim image FROM python:3.9-slim AS base # Set environment variables ENV PYTHONUNBUFFERED=1 \ LANG=C.UTF-8 \ HF_HOME="/tmp/huggingface_cache" \ HUGGINGFACE_HUB_CACHE="/tmp/huggingface_cache" # Set working directory WORKDIR /app # Copy requirements file COPY requirements.txt . # Install dependencies using a virtual environment RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* RUN python -m venv /app/venv && \ /app/venv/bin/pip install --no-cache-dir --upgrade pip && \ /app/venv/bin/pip install --no-cache-dir -r requirements.txt # Ensure the Hugging Face cache directory exists RUN mkdir -p $HF_HOME && chmod -R 777 $HF_HOME # Copy application files COPY main.py . # Set up Hugging Face token as a build argument (secured via environment variables) ARG HF_TOKEN ENV HF_TOKEN=${HF_TOKEN} # Pre-download models RUN /app/venv/bin/python -c "from transformers import pipeline; \ pipeline('sentiment-analysis', model='Ehrii/sentiment', use_auth_token='$HF_TOKEN')" || echo 'Failed to download multilingual model' RUN /app/venv/bin/python -c "from transformers import pipeline; \ pipeline('sentiment-analysis', model='siebert/sentiment-roberta-large-english', use_auth_token='$HF_TOKEN')" || echo 'Failed to download English model' # Expose FastAPI port EXPOSE 7860 # Run FastAPI server using the virtual environment CMD ["/app/venv/bin/uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]