Spaces:
Paused
Paused
File size: 1,304 Bytes
326d176 f9a4dbf 326d176 636bd97 326d176 750247f 326d176 51f299e 326d176 6db6156 326d176 750247f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
# Base image
FROM ghcr.io/ggerganov/llama.cpp:full-cuda
ENV DEBIAN_FRONTEND=noninteractive
# Update and install necessary dependencies
RUN apt update && \
apt install --no-install-recommends -y \
build-essential \
python3 \
python3-pip \
wget \
curl \
git \
cmake \
zlib1g-dev \
libblas-dev && \
apt clean && \
rm -rf /var/lib/apt/lists/*
# Setting up CUDA environment variables (this may not be necessary since you're using the official nvidia/cuda image, but it's good to be explicit)
ENV PATH="/usr/local/cuda/bin:$PATH" \
LD_LIBRARY_PATH="/usr/local/cuda/lib64:$LD_LIBRARY_PATH" \
CUDA_HOME="/usr/local/cuda"
WORKDIR /app
# Download ggml and mmproj models from HuggingFace
RUN wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q5_K_M.gguf && \
wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf
# Clone and build llava-server with CUDA support
RUN ls -al
RUN make LLAMA_CUBLAS=1
# Expose the port
EXPOSE 8080
# Start the llava-server with models
CMD ["--server", "--model", "llava-v1.6-mistral-7b.Q5_K_M.gguf", "--mmproj", "mmproj-model-f16.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "33"] |