forked from ollama/ollama
-
Notifications
You must be signed in to change notification settings - Fork 23
Closed
Labels
bugSomething isn't workingSomething isn't working
Description
What is the issue?
Hi, first of all - great work for implementing vulkan support! If finally made using APUs a bliss.
I tried merging the latest ollama upstream (0.6.0) that implements gemma support - and noticed that they fail to run.
All other models are working fine, and gemma3:12 inference works on CUDA or CPU.
ggml_vulkan: Compiling shaderstime=2025-03-12T14:02:23.756Z level=INFO source=server.go:619 msg="waiting for server to become available" status="llm server loading model"
............................................Done!
time=2025-03-12T14:03:21.599Z level=INFO source=ggml.go:289 msg="model weights" buffer=CPU size="787.5 MiB"
time=2025-03-12T14:03:21.599Z level=INFO source=ggml.go:289 msg="model weights" buffer=Vulkan0 size="7.6 GiB"
time=2025-03-12T14:03:22.319Z level=INFO source=server.go:619 msg="waiting for server to become available" status="llm server not responding"
time=2025-03-12T14:03:23.558Z level=INFO source=server.go:619 msg="waiting for server to become available" status="llm server loading model"
SIGSEGV: segmentation violation
PC=0x74186ac50cf9 m=27 sigcode=1 addr=0x18
signal arrived during cgo execution
goroutine 396 gp=0xc00316f340 m=27 mp=0xc002f1a808 [syscall]:
runtime.cgocall(0x5d72d07da5c0, 0xc00316a658)
runtime/cgocall.go:167 +0x4b fp=0xc00316a630 sp=0xc00316a5f8 pc=0x5d72cf9a660b
github.com/ollama/ollama/ml/backend/ggml._Cfunc_ggml_backend_tensor_set(0x741864151a80, 0xc002f36600, 0x0, 0x1200)
_cgo_gotypes.go:573 +0x45 fp=0xc00316a658 sp=0xc00316a630 pc=0x5d72cfd91885
github.com/ollama/ollama/ml/backend/ggml.New.func18.1(...)
github.com/ollama/ollama/ml/backend/ggml/ggml.go:325
github.com/ollama/ollama/ml/backend/ggml.New.func18()
github.com/ollama/ollama/ml/backend/ggml/ggml.go:325 +0x417 fp=0xc00316a778 sp=0xc00316a658 pc=0x5d72cfd988b7
golang.org/x/sync/errgroup.(*Group).Go.func1()
golang.org/x/[email protected]/errgroup/errgroup.go:78 +0x50 fp=0xc00316a7e0 sp=0xc00316a778 pc=0x5d72cfd8fed0
runtime.goexit({})
runtime/asm_amd64.s:1700 +0x1 fp=0xc00316a7e8 sp=0xc00316a7e0 pc=0x5d72cf9b1021
created by golang.org/x/sync/errgroup.(*Group).Go in goroutine 36
golang.org/x/[email protected]/errgroup/errgroup.go:75 +0x93
goroutine 1 gp=0xc000002380 m=nil [IO wait]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
runtime/proc.go:435 +0xce fp=0xc0004e1648 sp=0xc0004e1628 pc=0x5d72cf9a98ee
runtime.netpollblock(0xc0004e1698?, 0xcf943226?, 0x72?)
runtime/netpoll.go:575 +0xf7 fp=0xc0004e1680 sp=0xc0004e1648 pc=0x5d72cf96e6f7
internal/poll.runtime_pollWait(0x74187cfc6eb0, 0x72)
runtime/netpoll.go:351 +0x85 fp=0xc0004e16a0 sp=0xc0004e1680 pc=0x5d72cf9a8b05
internal/poll.(*pollDesc).wait(0xc000688c00?, 0x5d72cf94bcb1?, 0x0)
internal/poll/fd_poll_runtime.go:84 +0x27 fp=0xc0004e16c8 sp=0xc0004e16a0 pc=0x5d72cfa2ff87
internal/poll.(*pollDesc).waitRead(...)
internal/poll/fd_poll_runtime.go:89
internal/poll.(*FD).Accept(0xc000688c00)
internal/poll/fd_unix.go:620 +0x295 fp=0xc0004e1770 sp=0xc0004e16c8 pc=0x5d72cfa35355
net.(*netFD).accept(0xc000688c00)
net/fd_unix.go:172 +0x29 fp=0xc0004e1828 sp=0xc0004e1770 pc=0x5d72cfaa8169
net.(*TCPListener).accept(0xc00004e040)
net/tcpsock_posix.go:159 +0x1b fp=0xc0004e1878 sp=0xc0004e1828 pc=0x5d72cfabdb1b
net.(*TCPListener).Accept(0xc00004e040)
net/tcpsock.go:380 +0x30 fp=0xc0004e18a8 sp=0xc0004e1878 pc=0x5d72cfabc9d0
net/http.(*onceCloseListener).Accept(0xc000130360?)
<autogenerated>:1 +0x24 fp=0xc0004e18c0 sp=0xc0004e18a8 pc=0x5d72cfcd3b44
net/http.(*Server).Serve(0xc0002cc000, {0x5d72d0c84578, 0xc00004e040})
net/http/server.go:3424 +0x30c fp=0xc0004e19f0 sp=0xc0004e18c0 pc=0x5d72cfcab40c
github.com/ollama/ollama/runner/ollamarunner.Execute({0xc000034150, 0xe, 0xf})
github.com/ollama/ollama/runner/ollamarunner/runner.go:939 +0xe6a fp=0xc0004e1d08 sp=0xc0004e19f0 pc=0x5d72cfe3750a
github.com/ollama/ollama/runner.Execute({0xc000034130?, 0x0?, 0x0?})
github.com/ollama/ollama/runner/runner.go:20 +0xc9 fp=0xc0004e1d30 sp=0xc0004e1d08 pc=0x5d72cfe38069
github.com/ollama/ollama/cmd.NewCLI.func2(0xc0001f3200?, {0x5d72d07f5054?, 0x4?, 0x5d72d07f5058?})
github.com/ollama/ollama/cmd/cmd.go:1285 +0x45 fp=0xc0004e1d58 sp=0xc0004e1d30 pc=0x5d72d05aa185
github.com/spf13/cobra.(*Command).execute(0xc000737508, {0xc0004025a0, 0xf, 0xf})
github.com/spf13/[email protected]/command.go:940 +0x85c fp=0xc0004e1e78 sp=0xc0004e1d58 pc=0x5d72cfb212fc
github.com/spf13/cobra.(*Command).ExecuteC(0xc0004cc908)
github.com/spf13/[email protected]/command.go:1068 +0x3a5 fp=0xc0004e1f30 sp=0xc0004e1e78 pc=0x5d72cfb21b45
github.com/spf13/cobra.(*Command).Execute(...)
github.com/spf13/[email protected]/command.go:992
github.com/spf13/cobra.(*Command).ExecuteContext(...)
github.com/spf13/[email protected]/command.go:985
main.main()
github.com/ollama/ollama/main.go:12 +0x4d fp=0xc0004e1f50 sp=0xc0004e1f30 pc=0x5d72d05aa4ed
runtime.main()
runtime/proc.go:283 +0x29d fp=0xc0004e1fe0 sp=0xc0004e1f50 pc=0x5d72cf975cfd
runtime.goexit({})
runtime/asm_amd64.s:1700 +0x1 fp=0xc0004e1fe8 sp=0xc0004e1fe0 pc=0x5d72cf9b1021
[.....]
goroutine 1380 gp=0xc093cc6e00 m=404 mp=0xc0910aa808 [syscall]:
runtime.cgocall(0x5d72d07da5c0, 0xc093ccfe58)
runtime/cgocall.go:167 +0x4b fp=0xc093ccfe30 sp=0xc093ccfdf8 pc=0x5d72cf9a660b
github.com/ollama/ollama/ml/backend/ggml._Cfunc_ggml_backend_tensor_set(0x7418641a75e0, 0xc093cd8000, 0x0, 0x3c00)
_cgo_gotypes.go:573 +0x45 fp=0xc093ccfe58 sp=0xc093ccfe30 pc=0x5d72cfd91885
github.com/ollama/ollama/ml/backend/ggml.New.func18.1(...)
github.com/ollama/ollama/ml/backend/ggml/ggml.go:325
github.com/ollama/ollama/ml/backend/ggml.New.func18()
github.com/ollama/ollama/ml/backend/ggml/ggml.go:325 +0x417 fp=0xc093ccff78 sp=0xc093ccfe58 pc=0x5d72cfd988b7
golang.org/x/sync/errgroup.(*Group).Go.func1()
golang.org/x/[email protected]/errgroup/errgroup.go:78 +0x50 fp=0xc093ccffe0 sp=0xc093ccff78 pc=0x5d72cfd8fed0
runtime.goexit({})
runtime/asm_amd64.s:1700 +0x1 fp=0xc093ccffe8 sp=0xc093ccffe0 pc=0x5d72cf9b1021
created by golang.org/x/sync/errgroup.(*Group).Go in goroutine 36
golang.org/x/[email protected]/errgroup/errgroup.go:75 +0x93
rax 0x0
rbx 0x3d4ad980
rcx 0xfffffffe
rdx 0x741770000b80
rdi 0x741844ff8a70
rsi 0x0
rbp 0x741844ff8dc0
rsp 0x741844ff8cf0
r8 0x1
r9 0x1
r10 0x741844ff8b60
r11 0x0
r12 0x741844ff8d30
r13 0x1200
r14 0xc002f36600
r15 0x7418640c70c0
rip 0x74186ac50cf9
rflags 0x10206
cs 0x33
fs 0x0
gs 0x0
time=2025-03-12T14:03:24.595Z level=INFO source=server.go:619 msg="waiting for server to become available" status="llm server not responding"
time=2025-03-12T14:03:24.845Z level=ERROR source=sched.go:456 msg="error loading llama server" error="llama runner process has terminated: exit status 2"
OS
Linux
GPU
AMD
CPU
Intel
Ollama version
0.6.0
jhemmond
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't working