mirror of
https://github.com/ollama/ollama.git
synced 2026-05-05 23:53:43 -05:00
* metal: harden for ggml initialization failures ggml_metal_device_init performs a probe to verify the tensor API compiles. On some systems this passes, even though kernel coverage isn't complete, which results in a later crash when compiling the real kernels. This change adds a single retry if any of the error strings match this failure mode to disable the tensor API. It also hardens an error case in the Go initDevices to detect device initialization failures and panic instead of crashing later on a nil array entry. Fixes #15734 * review comments * review comments
32 lines
727 B
Go
32 lines
727 B
Go
package llm
|
|
|
|
import (
|
|
"context"
|
|
"strings"
|
|
"testing"
|
|
)
|
|
|
|
func TestWaitUntilRunningUsesStatusMessageWhenDoneErrIsNil(t *testing.T) {
|
|
done := make(chan struct{})
|
|
close(done)
|
|
|
|
status := &StatusWriter{}
|
|
status.SetLastError("llama_init_from_model: failed to initialize the context: failed to initialize Metal backend")
|
|
|
|
s := &llmServer{
|
|
done: done,
|
|
status: status,
|
|
}
|
|
|
|
err := s.WaitUntilRunning(context.Background())
|
|
if err == nil {
|
|
t.Fatal("expected error")
|
|
}
|
|
if strings.Contains(err.Error(), "%!w(<nil>)") {
|
|
t.Fatalf("unexpected wrapped nil error: %q", err)
|
|
}
|
|
if !strings.Contains(err.Error(), s.status.LastError()) {
|
|
t.Fatalf("error %q does not include status message %q", err, s.status.LastError())
|
|
}
|
|
}
|