Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@
**/secrets.dev.yaml
**/values.dev.yaml
**/.toolstarget
**/node_modules
**/node_modules
whisper.cpp
86 changes: 42 additions & 44 deletions pythonrpcserver.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,47 +1,45 @@
# # ------------------------------
# # Stage 1: Build Whisper.cpp
# # ------------------------------
FROM --platform=linux/amd64 python:3.8.15-slim-buster AS whisperbuild
RUN apt-get update && \
apt-get install -y curl gcc g++ make libglib2.0-0 libsm6 libxext6 libxrender-dev ffmpeg git

WORKDIR /whisper.cpp
# RUN git clone https://github.com/ggerganov/whisper.cpp . && make
RUN git clone https://github.com/ggerganov/whisper.cpp . && \
git checkout 021eef1 && \
make
RUN bash ./models/download-ggml-model.sh base.en
RUN bash ./models/download-ggml-model.sh tiny.en
RUN bash ./models/download-ggml-model.sh large-v3

# ------------------------------
# Stage 1: Build Whisper.cpp
# ------------------------------
FROM --platform=linux/amd64 python:3.8.15-slim-buster AS whisperbuild
RUN apt-get update && \
apt-get install -y curl gcc g++ make libglib2.0-0 libsm6 libxext6 libxrender-dev ffmpeg git && \
apt-get install -y wget && \
wget https://github.com/Kitware/CMake/releases/download/v3.27.7/cmake-3.27.7-linux-x86_64.sh -O /tmp/cmake-install.sh && \
chmod +x /tmp/cmake-install.sh && \
/tmp/cmake-install.sh --skip-license --prefix=/usr/local && \
rm /tmp/cmake-install.sh

WORKDIR /whisper.cpp
RUN git clone https://github.com/ggml-org/whisper.cpp . && \
cmake -B build -DWHISPER_BUILD_EXAMPLES=ON -DBUILD_SHARED_LIBS=OFF && \
cmake --build build --parallel $(nproc)
RUN bash ./models/download-ggml-model.sh base.en
RUN bash ./models/download-ggml-model.sh tiny.en
RUN bash ./models/download-ggml-model.sh large-v3

# ------------------------------
# Stage 2: Setup Python RPC Server
# ------------------------------
FROM --platform=linux/amd64 python:3.8.15-slim-buster AS rpcserver
RUN apt-get update && \
apt-get install -y curl gcc g++ make libglib2.0-0 libsm6 libxext6 libxrender-dev ffmpeg

ENV OMP_THREAD_LIMIT=1
COPY --from=whisperbuild /whisper.cpp/main /usr/local/bin/whisper
COPY --from=whisperbuild /whisper.cpp/models /PythonRpcServer/models
WORKDIR /PythonRpcServer

# Don't copy any py files here, so that we don't need to re-run whisper
COPY ./PythonRpcServer/transcribe_hellohellohello.wav .
# The output of tis whisper run is used when we set MOCK_RECOGNITION=MOCK for quick testing
RUN whisper -ojf -f transcribe_hellohellohello.wav

COPY ./PythonRpcServer/requirements.txt requirements.txt
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements.txt

COPY ct.proto ct.proto
RUN python -m grpc_tools.protoc -I . --python_out=./ --grpc_python_out=./ ct.proto

COPY ./PythonRpcServer .


CMD [ "nice", "-n", "18", "ionice", "-c", "2", "-n", "6", "python3", "-u", "/PythonRpcServer/server.py" ]



FROM --platform=linux/amd64 python:3.8.15-slim-buster AS rpcserver
RUN apt-get update && \
apt-get install -y curl gcc g++ make libglib2.0-0 libsm6 libxext6 libxrender-dev ffmpeg

ENV OMP_THREAD_LIMIT=1
COPY --from=whisperbuild /whisper.cpp/build/bin/whisper-cli /usr/local/bin/whisper
COPY --from=whisperbuild /whisper.cpp/models /PythonRpcServer/models
WORKDIR /PythonRpcServer

COPY ./PythonRpcServer/transcribe_hellohellohello.wav .
RUN whisper -ojf -f transcribe_hellohellohello.wav

COPY ./PythonRpcServer/requirements.txt requirements.txt
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements.txt

COPY ct.proto ct.proto
RUN python -m grpc_tools.protoc -I . --python_out=./ --grpc_python_out=./ ct.proto

COPY ./PythonRpcServer .

CMD [ "nice", "-n", "18", "ionice", "-c", "2", "-n", "6", "python3", "-u", "/PythonRpcServer/server.py" ]
47 changes: 47 additions & 0 deletions pythonrpcserver_legacy.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# # ------------------------------
# # Stage 1: Build Whisper.cpp
# # ------------------------------
FROM --platform=linux/amd64 python:3.8.15-slim-buster AS whisperbuild
RUN apt-get update && \
apt-get install -y curl gcc g++ make libglib2.0-0 libsm6 libxext6 libxrender-dev ffmpeg git

WORKDIR /whisper.cpp
# RUN git clone https://github.com/ggerganov/whisper.cpp . && make
RUN git clone https://github.com/ggerganov/whisper.cpp . && \
git checkout 021eef1 && \
make
RUN bash ./models/download-ggml-model.sh base.en
RUN bash ./models/download-ggml-model.sh tiny.en
RUN bash ./models/download-ggml-model.sh large-v3

# ------------------------------
# Stage 2: Setup Python RPC Server
# ------------------------------
FROM --platform=linux/amd64 python:3.8.15-slim-buster AS rpcserver
RUN apt-get update && \
apt-get install -y curl gcc g++ make libglib2.0-0 libsm6 libxext6 libxrender-dev ffmpeg

ENV OMP_THREAD_LIMIT=1
COPY --from=whisperbuild /whisper.cpp/main /usr/local/bin/whisper
COPY --from=whisperbuild /whisper.cpp/models /PythonRpcServer/models
WORKDIR /PythonRpcServer

# Don't copy any py files here, so that we don't need to re-run whisper
COPY ./PythonRpcServer/transcribe_hellohellohello.wav .
# The output of tis whisper run is used when we set MOCK_RECOGNITION=MOCK for quick testing
RUN whisper -ojf -f transcribe_hellohellohello.wav

COPY ./PythonRpcServer/requirements.txt requirements.txt
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir -r requirements.txt

COPY ct.proto ct.proto
RUN python -m grpc_tools.protoc -I . --python_out=./ --grpc_python_out=./ ct.proto

COPY ./PythonRpcServer .


CMD [ "nice", "-n", "18", "ionice", "-c", "2", "-n", "6", "python3", "-u", "/PythonRpcServer/server.py" ]



Loading