@@ -101,10 +101,67 @@ mountType: virtiofs
101101
102102Once inside the VM, install GPU/Vulkan support:
103103
104+ <p>
105+ <details>
106+ <summary>Click to expand script</summary>
107+
104108` ` ` bash
105- sudo install-vulkan-gpu.sh
109+ # !/bin/bash
110+ # SPDX-FileCopyrightText: Copyright The Lima Authors
111+ # SPDX-License-Identifier: Apache-2.0
112+
113+ set -eu -o pipefail
114+
115+ # Install required packages
116+ dnf install -y dnf-plugins-core dnf-plugin-versionlock llvm18-libs
117+
118+ # Install Vulkan and Mesa base packages
119+ dnf install -y \
120+ mesa-vulkan-drivers \
121+ vulkan-loader-devel \
122+ vulkan-headers \
123+ vulkan-tools \
124+ vulkan-loader \
125+ glslc
126+
127+ # Enable COPR repo with patched Mesa for Venus support
128+ dnf copr enable -y slp/mesa-krunkit fedora-40-aarch64
129+
130+ # Downgrade to patched Mesa version from COPR
131+ dnf downgrade -y mesa-vulkan-drivers.aarch64 \
132+ --repo=copr:copr.fedorainfracloud.org:slp:mesa-krunkit
133+
134+ # Lock Mesa version to prevent automatic upgrades
135+ dnf versionlock add mesa-vulkan-drivers
136+
137+ # Clean up
138+ dnf clean all
139+
140+ echo "Installing llama.cpp with Vulkan support..."
141+ # Build and install llama.cpp with Vulkan support
142+ dnf install -y git cmake clang curl-devel glslc vulkan-devel virglrenderer
143+ (
144+ cd ~
145+ git clone https://github.com/ggml-org/llama.cpp
146+ (
147+ cd llama.cpp
148+ git reset --hard 97340b4c9924be86704dbf155e97c8319849ee19
149+ cmake -B build -DGGML_VULKAN=ON -DGGML_CCACHE=OFF -DCMAKE_INSTALL_PREFIX=/usr
150+ # FIXME: the build seems to fail on Apple M4 Max (and probably on other processors too).
151+ # Error:
152+ # cc1: sorry, unimplemented: no support for ‘sme’ without ‘sve2’
153+ cmake --build build --config Release -j8
154+ cmake --install build
155+ )
156+ rm -fr llama.cpp
157+ )
158+
159+ echo "Successfully installed llama.cpp with Vulkan support. Use 'llama-cli' app with .gguf models."
106160```
107161
162+ </details >
163+ </p >
164+
108165The script will prompt to build and install ` llama.cpp ` with Venus support from source.
109166
110167After installation, run:
0 commit comments