llm_host / Dockerfile
Bahodir Nematjonov
docker updated
efb3b66
raw
history blame
913 Bytes
# Use the official Python 3.9 image
FROM python:3.9
# Set the working directory
WORKDIR /code
# Copy requirements.txt and install dependencies
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
# Install Ollama (needed for LLM response generation)
RUN curl -fsSL https://ollama.com/install.sh | sh
# Create a new user named "user" with user ID 1000 (non-root user for security)
RUN useradd -m -u 1000 user
# Switch to the "user" user
USER user
# Set environment variables
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
# Set the working directory to the user's home directory
WORKDIR $HOME/app
# Copy project files and set ownership to the user
COPY --chown=user . $HOME/app
# Expose the port FastAPI will run on
EXPOSE 7860
# Start FastAPI server with Uvicorn
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]