Skip to content

Commit

Permalink
Merge pull request #724 from cgruver/add-intel-gpu
Browse files Browse the repository at this point in the history
Add logic to build intel-gpu image to build_llama_and_whisper.sh
  • Loading branch information
rhatdan authored Feb 4, 2025
2 parents 07aba4b + 12d779d commit 341f962
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 21 deletions.
32 changes: 12 additions & 20 deletions container-images/intel-gpu/Containerfile
Original file line number Diff line number Diff line change
@@ -1,32 +1,24 @@
FROM quay.io/fedora/fedora:41 as builder

COPY intel-gpu/oneAPI.repo /etc/yum.repos.d/
COPY scripts/build_llama_and_whisper.sh /

RUN dnf install -y intel-opencl g++ cmake git tar libcurl-devel intel-oneapi-mkl-sycl-devel intel-oneapi-dnnl-devel intel-oneapi-compiler-dpcpp-cpp ; \
git clone https://github.com/ggerganov/llama.cpp.git -b b4523 ; \
cd llama.cpp ; \
mkdir -p build ; \
cd build ; \
source /opt/intel/oneapi/setvars.sh ; \
cmake .. -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON -DGGML_CCACHE=OFF -DGGML_NATIVE=OFF ; \
cmake --build . --config Release -j -v ; \
cmake --install . --prefix /llama-cpp
RUN chmod +x /build_llama_and_whisper.sh ; \
/build_llama_and_whisper.sh intel-gpu

FROM quay.io/fedora/fedora:41

ENV LD_LIBRARY_PATH="/usr/local/lib64:/usr/local/lib/:/opt/intel/oneapi/mkl/2025.0/lib:/opt/intel/oneapi/compiler/2025.0/opt/compiler/lib:/opt/intel/oneapi/compiler/2025.0/lib/clang/19/lib:/opt/intel/oneapi/compiler/2025.0/lib:/opt/intel/oneapi/umf/0.9/lib:/opt/intel/oneapi/tbb/2022.0/lib:/opt/intel/oneapi/tcm/1.2/lib:/opt/intel/oneapi/redist/opt/compiler/lib:/opt/intel/oneapi/redist/lib/clang/19/lib:/opt/intel/oneapi/redist/lib:/opt/intel/oneapi/mkl/2025.0/lib:/opt/intel/oneapi/compiler/2025.0/opt/compiler/lib:/opt/intel/oneapi/compiler/2025.0/lib/clang/19/lib:/opt/intel/oneapi/compiler/2025.0/lib:/opt/intel/oneapi/umf/0.9/lib:/opt/intel/oneapi/tbb/2022.0/lib:/opt/intel/oneapi/tcm/1.2/lib:/opt/intel/oneapi/redist/opt/compiler/lib:/opt/intel/oneapi/redist/lib/clang/19/lib:/opt/intel/oneapi/redist/lib"

COPY --from=builder /llama-cpp/bin/ /usr/local/bin/
COPY --from=builder /llama-cpp/lib/ /usr/local/lib/
COPY --from=builder /llama-cpp/lib64/ /usr/local/lib64/
COPY --from=builder /llama-cpp/include/ /usr/local/include/
COPY --from=builder /tmp/install/ /usr/
COPY intel-gpu/oneAPI.repo /etc/yum.repos.d/
COPY --chown=0:0 intel-gpu/entrypoint.sh /

RUN dnf install -y intel-opencl libcurl lspci clinfo intel-oneapi-runtime-compilers intel-oneapi-mkl-core intel-oneapi-mkl-sycl-blas intel-oneapi-runtime-dnnl ; \
RUN dnf install -y procps-ng python3 python3-pip python3-devel intel-level-zero oneapi-level-zero intel-compute-runtime libcurl lspci clinfo intel-oneapi-runtime-compilers intel-oneapi-mkl-core intel-oneapi-mkl-sycl-blas intel-oneapi-runtime-dnnl ; \
chown 0:0 /etc/passwd ; \
chown 0:0 /etc/group ; \
chmod g=u /etc/passwd /etc/group ; \
useradd -u 1000 -g render -G video -s /bin/bash -d /home/llama-user llama-user
chmod g=u /etc/passwd /etc/group /home ; \
chmod +x /entrypoint.sh

USER 10000

USER 1000
WORKDIR /home/llama-user
ENTRYPOINT ["/entrypoint.sh"]
CMD [ "tail", "-f", "/dev/null" ]
46 changes: 46 additions & 0 deletions container-images/intel-gpu/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#!/usr/bin/env bash

if [ -z ${HOME} ]
then
export HOME=/home/llama-user
fi

# Create Home directory
if [ ! -d "${HOME}" ]
then
mkdir -p "${HOME}"
fi

# Create User ID
if ! whoami &> /dev/null
then
if [ -w /etc/passwd ] && [ -w /etc/group ]
then
echo "${USER_NAME:-llama-user}:x:$(id -u):0:${USER_NAME:-llama-user} user:${HOME}:/bin/bash" >> /etc/passwd
echo "${USER_NAME:-llama-user}:x:$(id -u):" >> /etc/group
render_group="$(cat /etc/group | grep 'render:x')"
video_group="$(cat /etc/group | grep 'video:x')"
render_group_new="${render_group}${USER_NAME:-llama-user}"
video_group_new="${video_group}${USER_NAME:-llama-user}"
sed "s|${render_group}|${render_group_new}|g" /etc/group > /tmp/group
cat /tmp/group > /etc/group
sed "s|${video_group}|${video_group_new}|g" /etc/group > /tmp/group
cat /tmp/group > /etc/group
fi
fi

# Configure Z shell
if [ ! -f ${HOME}/.zshrc ]
then
(echo "source /opt/intel/oneapi/setvars.sh") > ${HOME}/.zshrc
fi

# Configure Bash shell
if [ ! -f ${HOME}/.bashrc ]
then
(echo "source /opt/intel/oneapi/setvars.sh") > ${HOME}/.bashrc
fi

source /opt/intel/oneapi/setvars.sh

exec "$@"
13 changes: 12 additions & 1 deletion container-images/scripts/build_llama_and_whisper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ dnf_install() {
"procps-ng" "git" "dnf-plugins-core" "libcurl-devel")
local vulkan_rpms=("vulkan-headers" "vulkan-loader-devel" "vulkan-tools" \
"spirv-tools" "glslc" "glslang")
local intel_rpms=("intel-oneapi-mkl-sycl-devel" "intel-oneapi-dnnl-devel" \
"intel-oneapi-compiler-dpcpp-cpp" "intel-level-zero" \
"oneapi-level-zero" "oneapi-level-zero-devel" "intel-compute-runtime")

# All the UBI-based ones
if [ "$containerfile" = "ramalama" ] || [ "$containerfile" = "rocm" ] || \
Expand Down Expand Up @@ -37,6 +40,11 @@ dnf_install() {
# shellcheck disable=SC1091
. /opt/rh/gcc-toolset-12/enable
fi

if [ "$containerfile" = "intel-gpu" ]; then
dnf install -y "${rpm_list[@]}" "${intel_rpms[@]}"
source /opt/intel/oneapi/setvars.sh
fi
}

cmake_check_warnings() {
Expand All @@ -51,7 +59,7 @@ cmake_steps() {
}

set_install_prefix() {
if [ "$containerfile" = "cuda" ]; then
if [ "$containerfile" = "cuda" ] || [ "$containerfile" = "intel-gpu" ]; then
install_prefix="/tmp/install"
else
install_prefix="/usr"
Expand All @@ -70,6 +78,9 @@ configure_common_flags() {
vulkan | asahi)
common_flags+=("-DGGML_VULKAN=1")
;;
intel-gpu)
common_flags+=("-DGGML_SYCL=ON" "-DCMAKE_C_COMPILER=icx" "-DCMAKE_CXX_COMPILER=icpx")
;;
esac
}

Expand Down

0 comments on commit 341f962

Please sign in to comment.