mirror of
https://github.com/ollama/ollama.git
synced 2026-05-06 16:11:34 -05:00
Open
opened 2026-05-03 09:09:46 -05:00 by GiteaMirror
·
156 comments
No Branch/Tag Specified
main
dhiltgen/ci
parth-launch-plan-gating
hoyyeva/anthropic-reference-images-path
parth-anthropic-reference-images-path
brucemacd/download-before-remove
hoyyeva/editor-config-repair
parth-mlx-decode-checkpoints
parth-launch-codex-app
hoyyeva/fix-codex-model-metadata-warning
hoyyeva/qwen
parth/hide-claude-desktop-till-release
hoyyeva/opencode-image-modality
parth-add-claude-code-autoinstall
release_v0.22.0
pdevine/manifest-list
codex/fix-codex-model-metadata-warning
pdevine/addressable-manifest
brucemacd/launch-fetch-reccomended
jmorganca/llama-compat
launch-copilot-cli
hoyyeva/opencode-thinking
release_v0.20.7
parth-auto-save-backup
parth-test
jmorganca/gemma4-audio-replacements
fix-manifest-digest-on-pull
hoyyeva/vscode-improve
brucemacd/install-server-wait
parth/update-claude-docs
brucemac/start-ap-install
pdevine/mlx-update
pdevine/qwen35_vision
drifkin/api-show-fallback
mintlify/image-generation-1773352582
hoyyeva/server-context-length-local-config
jmorganca/faster-reptition-penalties
jmorganca/convert-nemotron
parth-pi-thinking
pdevine/sampling-penalties
jmorganca/fix-create-quantization-memory
dongchen/resumable_transfer_fix
pdevine/sampling-cache-error
jessegross/mlx-usage
hoyyeva/openclaw-config
hoyyeva/app-html
pdevine/qwen3next
brucemacd/sign-sh-install
brucemacd/tui-update
brucemacd/usage-api
jmorganca/launch-empty
fix-app-dist-embed
mxyng/mlx-compile
mxyng/mlx-quant
mxyng/mlx-glm4.7
mxyng/mlx
brucemacd/simplify-model-picker
jmorganca/qwen3-concurrent
fix-glm-4.7-flash-mla-config
drifkin/qwen3-coder-opening-tag
brucemacd/usage-cli
fix-cuda12-fattn-shmem
ollama-imagegen-docs
parth/fix-multiline-inputs
brucemacd/config-docs
mxyng/model-files
mxyng/simple-execute
fix-imagegen-ollama-models
mxyng/async-upload
jmorganca/lazy-no-dtype-changes
imagegen-auto-detect-create
parth/decrease-concurrent-download-hf
fix-mlx-quantize-init
jmorganca/x-cleanup
usage
imagegen-readme
jmorganca/glm-image
mlx-gpu-cd
jmorganca/imagegen-modelfile
parth/agent-skills
parth/agent-allowlist
parth/signed-in-offline
parth/agents
parth/fix-context-chopping
improve-cloud-flow
parth/add-models-websearch
parth/prompt-renderer-mcp
jmorganca/native-settings
jmorganca/download-stream-hash
jmorganca/client2-rebased
brucemacd/oai-chat-req-multipart
jessegross/multi_chunk_reserve
grace/additional-omit-empty
grace/mistral-3-large
mxyng/tokenizer2
mxyng/tokenizer
jessegross/flash
hoyyeva/windows-nacked-app
mxyng/cleanup-attention
grace/deepseek-parser
hoyyeva/remember-unsent-prompt
parth/add-lfs-pointer-error-conversion
parth/olmo2-test2
hoyyeva/ollama-launchagent-plist
nicole/olmo-model
parth/olmo-test
mxyng/remove-embedded
parth/render-template
jmorganca/intellect-3
parth/remove-prealloc-linter
jmorganca/cmd-eval
nicole/nomic-embed-text-fix
mxyng/lint-2
hoyyeva/add-gemini-3-pro-preview
hoyyeva/load-model-list
mxyng/expand-path
mxyng/environ-2
hoyyeva/deeplink-json-encoding
parth/improve-tool-calling-tests
hoyyeva/conversation
hoyyeva/assistant-edit-response
hoyyeva/thinking
origin/brucemacd/invalid-char-i-err
parth/improve-tool-calling
jmorganca/required-omitempty
grace/qwen3-vl-tests
mxyng/iter-client
parth/docs-readme
nicole/embed-test
pdevine/integration-benchstat
parth/remove-generate-cmd
parth/add-toolcall-id
mxyng/server-tests
jmorganca/glm-4.6
jmorganca/gin-h-compat
drifkin/stable-tool-args
pdevine/qwen3-more-thinking
parth/add-websearch-client
nicole/websearch_local
jmorganca/qwen3-coder-updates
grace/deepseek-v3-migration-tests
mxyng/fix-create
jmorganca/cloud-errors
pdevine/parser-tidy
revert-12233-parth/simplify-entrypoints-runner
parth/enable-so-gpt-oss
brucemacd/qwen3vl
jmorganca/readme-simplify
parth/gpt-oss-structured-outputs
revert-12039-jmorganca/tools-braces
mxyng/embeddings
mxyng/gguf
mxyng/benchmark
mxyng/types-null
parth/move-parsing
mxyng/gemma2
jmorganca/docs
mxyng/16-bit
mxyng/create-stdin
pdevine/authorizedkeys
mxyng/quant
parth/opt-in-error-context-window
brucemacd/cache-models
brucemacd/runner-completion
jmorganca/llama-update-6
brucemacd/benchmark-list
brucemacd/partial-read-caps
parth/deepseek-r1-tools
mxyng/omit-array
parth/tool-prefix-temp
brucemacd/runner-test
jmorganca/qwen25vl
brucemacd/model-forward-test-ext
parth/python-function-parsing
jmorganca/cuda-compression-none
drifkin/num-parallel
drifkin/chat-truncation-fix
jmorganca/sync
parth/python-tools-calling
drifkin/array-head-count
brucemacd/create-no-loop
parth/server-enable-content-stream-with-tools
qwen25omni
mxyng/v3
brucemacd/ropeconfig
jmorganca/silence-tokenizer
parth/sample-so-test
parth/sampling-structured-outputs
brucemacd/doc-go-engine
parth/constrained-sampling-json
jmorganca/mistral-wip
brucemacd/mistral-small-convert
parth/sample-unmarshal-json-for-params
brucemacd/jomorganca/mistral
pdevine/bfloat16
jmorganca/mistral
brucemacd/mistral
pdevine/logging
parth/sample-correctness-fix
parth/sample-fix-sorting
jmorgan/sample-fix-sorting-extras
jmorganca/temp-0-images
brucemacd/parallel-embed-models
brucemacd/shim-grammar
jmorganca/fix-gguf-error
bmizerany/nameswork
jmorganca/faster-releases
bmizerany/validatenames
brucemacd/err-no-vocab
brucemacd/rope-config
brucemacd/err-hint
brucemacd/qwen2_5
brucemacd/logprobs
brucemacd/new_runner_graph_bench
progress-flicker
brucemacd/forward-test
brucemacd/go_qwen2
pdevine/gemma2
jmorganca/add-missing-symlink-eval
mxyng/next-debug
parth/set-context-size-openai
brucemacd/next-bpe-bench
brucemacd/next-bpe-test
brucemacd/new_runner_e2e
brucemacd/new_runner_qwen2
pdevine/convert-cohere2
brucemacd/convert-cli
parth/log-probs
mxyng/next-mlx
mxyng/cmd-history
parth/templating
parth/tokenize-detokenize
brucemacd/check-key-register
bmizerany/grammar
jmorganca/vendor-081b29bd
mxyng/func-checks
jmorganca/fix-null-format
parth/fix-default-to-warn-json
jmorganca/qwen2vl
jmorganca/no-concat
parth/cmd-cleanup-SO
brucemacd/check-key-register-structured-err
parth/openai-stream-usage
parth/fix-referencing-so
stream-tools-stop
jmorganca/degin-1
brucemacd/install-path-clean
brucemacd/push-name-validation
brucemacd/browser-key-register
jmorganca/openai-fix-first-message
jmorganca/fix-proxy
jessegross/sample
parth/disallow-streaming-tools
dhiltgen/remove_submodule
jmorganca/ga
jmorganca/mllama
pdevine/newlines
pdevine/geems-2b
jmorganca/llama-bump
mxyng/modelname-7
mxyng/gin-slog
mxyng/modelname-6
jyan/convert-prog
jyan/quant5
paligemma-support
pdevine/import-docs
jmorganca/openai-context
jyan/paligemma
jyan/p2
jyan/palitest
bmizerany/embedspeedup
jmorganca/llama-vit
brucemacd/allow-ollama
royh/ep-methods
royh/whisper
mxyng/api-models
mxyng/fix-memory
jyan/q4_4/8
jyan/ollama-v
royh/stream-tools
roy-embed-parallel
bmizerany/hrm
revert-5963-revert-5924-mxyng/llama3.1-rope
royh/embed-viz
jyan/local2
jyan/auth
jyan/local
jyan/parse-temp
jmorganca/template-mistral
jyan/reord-g
royh-openai-suffixdocs
royh-imgembed
royh-embed-parallel
jyan/quant4
royh-precision
jyan/progress
pdevine/fix-template
jyan/quant3
pdevine/ggla
mxyng/update-registry-domain
jmorganca/ggml-static
mxyng/create-context
jyan/v0.146
mxyng/layers-from-files
build_dist
bmizerany/noseek
royh-ls
royh-name
timeout
mxyng/server-timestamp
bmizerany/nosillyggufslurps
royh-params
jmorganca/llama-cpp-7c26775
royh-openai-delete
royh-show-rigid
jmorganca/enable-fa
jmorganca/no-error-template
jyan/format
royh-testdelete
bmizerany/fastverify
language_support
pdevine/ps-glitches
brucemacd/tokenize
bruce/iq-quants
bmizerany/filepathwithcoloninhost
mxyng/split-bin
bmizerany/client-registry
jmorganca/if-none-match
native
jmorganca/native
jmorganca/batch-embeddings
jmorganca/initcmake
jmorganca/mm
pdevine/showggmlinfo
modenameenforcealphanum
bmizerany/modenameenforcealphanum
jmorganca/done-reason
jmorganca/llama-cpp-8960fe8
ollama.com
bmizerany/filepathnobuild
bmizerany/types/model/defaultfix
rmdisplaylong
nogogen
bmizerany/x
modelfile-readme
bmizerany/replacecolon
jmorganca/limit
jmorganca/execstack
jmorganca/replace-assets
mxyng/tune-concurrency
jmorganca/testing
whitespace-detection
jmorganca/options
upgrade-all
scratch
cuda-search
mattw/airenamer
mattw/allmodelsonhuggingface
mattw/quantcontext
mattw/whatneedstorun
brucemacd/llama-mem-calc
mattw/faq-context
mattw/communitylinks
mattw/noprune
mattw/python-functioncalling
rename
mxyng/install
pulse
remove-first
editor
mattw/selfqueryingretrieval
cgo
mattw/howtoquant
api
matt/streamingapi
format-config
mxyng/extra-args
shell
update-nous-hermes
cp-model
upload-progress
fix-unknown-model
fix-model-names
delete-fix
insecure-registry
ls
deletemodels
progressbar
readme-updates
license-layers
skip-list
list-models
modelpath
matt/examplemodelfiles
distribution
go-opts
v0.23.1
v0.23.1-rc0
v0.23.0
v0.23.0-rc0
v0.22.1
v0.22.1-rc1
v0.22.1-rc0
v0.22.0
v0.22.0-rc1
v0.21.3-rc0
v0.21.2-rc1
v0.21.2
v0.21.2-rc0
v0.21.1
v0.21.1-rc1
v0.21.1-rc0
v0.21.0
v0.21.0-rc1
v0.21.0-rc0
v0.20.8-rc0
v0.20.7
v0.20.7-rc1
v0.20.7-rc0
v0.20.6
v0.20.6-rc1
v0.20.6-rc0
v0.20.5
v0.20.5-rc2
v0.20.5-rc1
v0.20.5-rc0
v0.20.4
v0.20.4-rc2
v0.20.4-rc1
v0.20.4-rc0
v0.20.3
v0.20.3-rc0
v0.20.2
v0.20.1
v0.20.1-rc2
v0.20.1-rc1
v0.20.1-rc0
v0.20.0
v0.20.0-rc1
v0.20.0-rc0
v0.19.0
v0.19.0-rc2
v0.19.0-rc1
v0.19.0-rc0
v0.18.4-rc1
v0.18.4-rc0
v0.18.3
v0.18.3-rc2
v0.18.3-rc1
v0.18.3-rc0
v0.18.2
v0.18.2-rc1
v0.18.2-rc0
v0.18.1
v0.18.1-rc1
v0.18.1-rc0
v0.18.0
v0.18.0-rc2
v0.18.0-rc1
v0.18.0-rc0
v0.17.8-rc4
v0.17.8-rc3
v0.17.8-rc2
v0.17.8-rc1
v0.17.8-rc0
v0.17.7
v0.17.7-rc2
v0.17.7-rc1
v0.17.7-rc0
v0.17.6
v0.17.5
v0.17.4
v0.17.3
v0.17.2
v0.17.1
v0.17.1-rc2
v0.17.1-rc1
v0.17.1-rc0
v0.17.0
v0.17.0-rc2
v0.17.0-rc1
v0.17.0-rc0
v0.16.3
v0.16.3-rc2
v0.16.3-rc1
v0.16.3-rc0
v0.16.2
v0.16.2-rc0
v0.16.1
v0.16.0
v0.16.0-rc2
v0.16.0-rc0
v0.16.0-rc1
v0.15.6
v0.15.5
v0.15.5-rc5
v0.15.5-rc4
v0.15.5-rc3
v0.15.5-rc2
v0.15.5-rc1
v0.15.5-rc0
v0.15.4
v0.15.3
v0.15.2
v0.15.1
v0.15.1-rc1
v0.15.1-rc0
v0.15.0-rc6
v0.15.0
v0.15.0-rc5
v0.15.0-rc4
v0.15.0-rc3
v0.15.0-rc2
v0.15.0-rc1
v0.15.0-rc0
v0.14.3
v0.14.3-rc3
v0.14.3-rc2
v0.14.3-rc1
v0.14.3-rc0
v0.14.2
v0.14.2-rc1
v0.14.2-rc0
v0.14.1
v0.14.0-rc11
v0.14.0
v0.14.0-rc10
v0.14.0-rc9
v0.14.0-rc8
v0.14.0-rc7
v0.14.0-rc6
v0.14.0-rc5
v0.14.0-rc4
v0.14.0-rc3
v0.14.0-rc2
v0.14.0-rc1
v0.14.0-rc0
v0.13.5
v0.13.5-rc1
v0.13.5-rc0
v0.13.4-rc2
v0.13.4
v0.13.4-rc1
v0.13.4-rc0
v0.13.3
v0.13.3-rc1
v0.13.3-rc0
v0.13.2
v0.13.2-rc2
v0.13.2-rc1
v0.13.2-rc0
v0.13.1
v0.13.1-rc2
v0.13.1-rc1
v0.13.1-rc0
v0.13.0
v0.13.0-rc0
v0.12.11
v0.12.11-rc1
v0.12.11-rc0
v0.12.10
v0.12.10-rc1
v0.12.10-rc0
v0.12.9-rc0
v0.12.9
v0.12.8
v0.12.8-rc0
v0.12.7
v0.12.7-rc1
v0.12.7-rc0
v0.12.7-citest0
v0.12.6
v0.12.6-rc1
v0.12.6-rc0
v0.12.5
v0.12.5-rc0
v0.12.4
v0.12.4-rc7
v0.12.4-rc6
v0.12.4-rc5
v0.12.4-rc4
v0.12.4-rc3
v0.12.4-rc2
v0.12.4-rc1
v0.12.4-rc0
v0.12.3
v0.12.2
v0.12.2-rc0
v0.12.1
v0.12.1-rc1
v0.12.1-rc2
v0.12.1-rc0
v0.12.0
v0.12.0-rc1
v0.12.0-rc0
v0.11.11
v0.11.11-rc3
v0.11.11-rc2
v0.11.11-rc1
v0.11.11-rc0
v0.11.10
v0.11.9
v0.11.9-rc0
v0.11.8
v0.11.8-rc0
v0.11.7-rc1
v0.11.7-rc0
v0.11.7
v0.11.6
v0.11.6-rc0
v0.11.5-rc4
v0.11.5-rc3
v0.11.5
v0.11.5-rc5
v0.11.5-rc2
v0.11.5-rc1
v0.11.5-rc0
v0.11.4
v0.11.4-rc0
v0.11.3
v0.11.3-rc0
v0.11.2
v0.11.1
v0.11.0-rc0
v0.11.0-rc1
v0.11.0-rc2
v0.11.0
v0.10.2-int1
v0.10.1
v0.10.0
v0.10.0-rc4
v0.10.0-rc3
v0.10.0-rc2
v0.10.0-rc1
v0.10.0-rc0
v0.9.7-rc1
v0.9.7-rc0
v0.9.6
v0.9.6-rc0
v0.9.6-ci0
v0.9.5
v0.9.4-rc5
v0.9.4-rc6
v0.9.4
v0.9.4-rc3
v0.9.4-rc4
v0.9.4-rc1
v0.9.4-rc2
v0.9.4-rc0
v0.9.3
v0.9.3-rc5
v0.9.4-citest0
v0.9.3-rc4
v0.9.3-rc3
v0.9.3-rc2
v0.9.3-rc1
v0.9.3-rc0
v0.9.2
v0.9.1
v0.9.1-rc1
v0.9.1-rc0
v0.9.1-ci1
v0.9.1-ci0
v0.9.0
v0.9.0-rc0
v0.8.0
v0.8.0-rc0
v0.7.1-rc2
v0.7.1
v0.7.1-rc1
v0.7.1-rc0
v0.7.0
v0.7.0-rc1
v0.7.0-rc0
v0.6.9-rc0
v0.6.8
v0.6.8-rc0
v0.6.7
v0.6.7-rc2
v0.6.7-rc1
v0.6.7-rc0
v0.6.6
v0.6.6-rc2
v0.6.6-rc1
v0.6.6-rc0
v0.6.5-rc1
v0.6.5
v0.6.5-rc0
v0.6.4-rc0
v0.6.4
v0.6.3-rc1
v0.6.3
v0.6.3-rc0
v0.6.2
v0.6.2-rc0
v0.6.1
v0.6.1-rc0
v0.6.0-rc0
v0.6.0
v0.5.14-rc0
v0.5.13
v0.5.13-rc6
v0.5.13-rc5
v0.5.13-rc4
v0.5.13-rc3
v0.5.13-rc2
v0.5.13-rc1
v0.5.13-rc0
v0.5.12
v0.5.12-rc1
v0.5.12-rc0
v0.5.11
v0.5.10
v0.5.9
v0.5.9-rc0
v0.5.8-rc13
v0.5.8
v0.5.8-rc12
v0.5.8-rc11
v0.5.8-rc10
v0.5.8-rc9
v0.5.8-rc8
v0.5.8-rc7
v0.5.8-rc6
v0.5.8-rc5
v0.5.8-rc4
v0.5.8-rc3
v0.5.8-rc2
v0.5.8-rc1
v0.5.8-rc0
v0.5.7
v0.5.6
v0.5.5
v0.5.5-rc0
v0.5.4
v0.5.3
v0.5.3-rc0
v0.5.2
v0.5.2-rc3
v0.5.2-rc2
v0.5.2-rc1
v0.5.2-rc0
v0.5.1
v0.5.0
v0.5.0-rc1
v0.4.8-rc0
v0.4.7
v0.4.6
v0.4.5
v0.4.4
v0.4.3
v0.4.3-rc0
v0.4.2
v0.4.2-rc1
v0.4.2-rc0
v0.4.1
v0.4.1-rc0
v0.4.0
v0.4.0-rc8
v0.4.0-rc7
v0.4.0-rc6
v0.4.0-rc5
v0.4.0-rc4
v0.4.0-rc3
v0.4.0-rc2
v0.4.0-rc1
v0.4.0-rc0
v0.4.0-ci3
v0.3.14
v0.3.14-rc0
v0.3.13
v0.3.12
v0.3.12-rc5
v0.3.12-rc4
v0.3.12-rc3
v0.3.12-rc2
v0.3.12-rc1
v0.3.11
v0.3.11-rc4
v0.3.11-rc3
v0.3.11-rc2
v0.3.11-rc1
v0.3.10
v0.3.10-rc1
v0.3.9
v0.3.8
v0.3.7
v0.3.7-rc6
v0.3.7-rc5
v0.3.7-rc4
v0.3.7-rc3
v0.3.7-rc2
v0.3.7-rc1
v0.3.6
v0.3.5
v0.3.4
v0.3.3
v0.3.2
v0.3.1
v0.3.0
v0.2.8
v0.2.8-rc2
v0.2.8-rc1
v0.2.7
v0.2.6
v0.2.5
v0.2.4
v0.2.3
v0.2.2
v0.2.2-rc2
v0.2.2-rc1
v0.2.1
v0.2.0
v0.1.49-rc14
v0.1.49-rc13
v0.1.49-rc12
v0.1.49-rc11
v0.1.49-rc10
v0.1.49-rc9
v0.1.49-rc8
v0.1.49-rc7
v0.1.49-rc6
v0.1.49-rc4
v0.1.49-rc5
v0.1.49-rc3
v0.1.49-rc2
v0.1.49-rc1
v0.1.48
v0.1.47
v0.1.46
v0.1.45-rc5
v0.1.45
v0.1.45-rc4
v0.1.45-rc3
v0.1.45-rc2
v0.1.45-rc1
v0.1.44
v0.1.43
v0.1.42
v0.1.41
v0.1.40
v0.1.40-rc1
v0.1.39
v0.1.39-rc2
v0.1.39-rc1
v0.1.38
v0.1.37
v0.1.36
v0.1.35
v0.1.35-rc1
v0.1.34
v0.1.34-rc1
v0.1.33
v0.1.33-rc7
v0.1.33-rc6
v0.1.33-rc5
v0.1.33-rc4
v0.1.33-rc3
v0.1.33-rc2
v0.1.33-rc1
v0.1.32
v0.1.32-rc2
v0.1.32-rc1
v0.1.31
v0.1.30
v0.1.29
v0.1.28
v0.1.27
v0.1.26
v0.1.25
v0.1.24
v0.1.23
v0.1.22
v0.1.21
v0.1.20
v0.1.19
v0.1.18
v0.1.17
v0.1.16
v0.1.15
v0.1.14
v0.1.13
v0.1.12
v0.1.11
v0.1.10
v0.1.9
v0.1.8
v0.1.7
v0.1.6
v0.1.5
v0.1.4
v0.1.3
v0.1.2
v0.1.1
v0.1.0
v0.0.21
v0.0.20
v0.0.19
v0.0.18
v0.0.17
v0.0.16
v0.0.15
v0.0.14
v0.0.13
v0.0.12
v0.0.11
v0.0.10
v0.0.9
v0.0.8
v0.0.7
v0.0.6
v0.0.5
v0.0.4
v0.0.3
v0.0.2
v0.0.1
Labels
Clear labels
amd
api
app
bug
build
cli
cloud
compatibility
context-length
create
docker
documentation
embeddings
feature request
feedback wanted
good first issue
gpt-oss
gpu
harmony
help wanted
image
install
intel
js
launch
linux
macos
memory
mlx
model
needs more info
networking
nvidia
ollama.com
performance
pull-request
python
question
registry
rendering
thinking
tools
top
vulkan
windows
wsl
Mirrored from GitHub Pull Request
No Label
bug
Milestone
No items
No Milestone
Projects
Clear projects
No project
No Assignees
Notifications
Due Date
No due date set.
Dependencies
No dependencies set.
Reference: github-starred/ollama#62498
Reference in New Issue
Block a user
Blocking a user prevents them from interacting with repositories, such as opening or commenting on pull requests or issues. Learn more about blocking a user.
Delete Branch "%!s()"
Deleting a branch is permanent. Although the deleted branch may continue to exist for a short time before it actually gets removed, it CANNOT be undone in most cases. Continue?
Originally created by @jmorganca on GitHub (Oct 28, 2023).
Original GitHub issue: https://github.com/ollama/ollama/issues/941
Originally assigned to: @mxyng on GitHub.
While rare,
ollama pullwill sometimes result in a digest mismatch on download@yohskar commented on GitHub (Nov 2, 2023):
Sadly not so rare
@wndalci commented on GitHub (Nov 2, 2023):
Can confirm I get this error too.
Error: digest mismatch, file must be downloaded again: want sha256:22f7f8ef5f4c791c1b03d7eb414399294764d7cc82c7e94aa81a1feb80a983a2, got sha256:c05efac18bab97102ead2aeba0024d180264f658b5fcca629e9da6e462b69595I retried the download again and got a different hash again
sha256:1a5e92f4ae4bc51dcdc8432154a8ba1ce1411e1c09ff22abde654c174375ea50My device is an M1 Pro Apple Silicon Macbook and my internet is normally pretty stable. Also not sure if this is normal or not but my download speed becomes very slow towards the end of the download, around 200kb/s and sometimes dipping to under 100.
Hope this bug gets fixed soon, ollama does look very interesting 😁
@s-payyeri commented on GitHub (Nov 2, 2023):
almost exact setup as https://github.com/jmorganca/ollama/issues/941#issuecomment-1791264690, and I get the error
~ docker exec -it ollama ollama run llama2
pulling manifest
pulling 8c17c2ebb0ea... 100% |████████████████| (7.0/7.0 kB, 3.7 kB/s)
pulling 7c23fb36d801... 100% |████████████████| (4.8/4.8 kB, 1.9 kB/s)
pulling 2e0493f67d0c... 100% |█████████████████████| (59/59 B, 24 B/s)
pulling 2759286baa87... 100% |███████████████████| (105/105 B, 49 B/s)
pulling 5407e3188df9... 100% |██████████████████| (529/529 B, 216 B/s)
verifying sha256 digest
Error: digest mismatch, file must be downloaded again: want sha256:22f7f8ef5f4c791c1b03d7eb414399294764d7cc82c7e94aa81a1feb80a983a2, got sha256:262af94bcc6457d24f03b62ccb09598c406ee04d6d5a01246f44ea93ba20a022
@mxyng commented on GitHub (Nov 3, 2023):
There appears to be some instability with the backing file store. We're actively investigating
@johnmaguire commented on GitHub (Nov 4, 2023):
I have received this error approximately 3 times while running
ollama run llama2in the past week. I've not successfully been able to get it to work.Note that while the wanted hash is the same as others in this thread, the received hash is different.
macOS Sonoma
@karpathy commented on GitHub (Nov 4, 2023):
Same error, tried to re-download a few times but can't seem to get Llama 2 70B working on my Mac. But Llama 2 7B worked earlier.
@danja commented on GitHub (Nov 6, 2023):
Same problem with 7B+ models on home desktop (i7, 16GB, CPU-only). I was eventually able to get orca-mini 3B running (woo-hoo!).
What might be relevant is that I have a slow, sometimes unreliable net connection. I suspect the problem is fairly low-level, chunks not aligning properly around the inline retries/resumes, and/or after restarting when it pulls a load from a cache somewhere. Around the net/http lib?
I do want this on my remote server, that has a datacentre-fast connection, I'll report back if that makes any difference.
@johnmaguire commented on GitHub (Nov 8, 2023):
FWIW, I have a 1 Gbps fiber connection that is highly reliable. However, the 7B server seems to be slow and unreliable. I did have to do multiple retries each time, IIRC. Despite completing, I never was able to get it working due to this error.
I was able to download and run the uncensored version without issue.
@Bortus-AI commented on GitHub (Nov 20, 2023):
I am getting the same issue with all wizardcoder models
I have a 10Gbps fiber so pretty sure its not my internet. All other models download perfectly
@Bortus-AI commented on GitHub (Nov 20, 2023):
Running rm -rf ~/.ollama/models/* and the doing ollama pull wizardcoder fixed the issue. But as you know I know have to redownload all models. I believe a purge method is coming soon to fix this issue correct?
@ramkrishna757575 commented on GitHub (Dec 10, 2023):
This is not rare...I'm continuously getting this error. Unable to use any model
@Bortus-AI commented on GitHub (Dec 12, 2023):
Just spun up two new servers and getting this error with every model. Anyone have a fix yet for this?
@ramkrishna757575 commented on GitHub (Dec 13, 2023):
I just went into the shell of the ollama docker container, and from there i tried doing
ollama pull [MODEL].It failed for 3-4 times, but eventually it worked.
This needs a proper fix as this gets frustrating quickly.
@Bortus-AI commented on GitHub (Dec 13, 2023):
Agreed. Took 9 tries to get just one model to download. Fighting with the rest now. Very frustrating
Gave up on orca-mini:13b after 12 tries.
@go-native commented on GitHub (Dec 19, 2023):
Getting the same error after 26 GB download. Any news on this?
@byteconcepts commented on GitHub (Dec 31, 2023):
Same here: Not one model downloads correctly any more.
Tried multiple models with multiple tries each: No success :-(
@byteconcepts commented on GitHub (Jan 1, 2024):
Sorry I have to comment directly again, but it is now almost impossible to download a model.
I repeatedly again tried various models and their variants in the background over the whole day and didn't get one correct download at the end.
Imho it's also a pretty strange behavior that the downloader want's to check the checksum of the downloaded file, but should already know that the filesize it downloaded is not the size the file should have.
I don't know how it's implemented but this behavior seems strange to me. - If the filesize of the downloaded file is really correct, then it's a real problem that the file must be corrupt. - But then only sometimes because some users reported that they get the file with the correct checksum after a few tries.
@Donno191 commented on GitHub (Jan 6, 2024):
Same issue downloading multiple time.
Error: digest mismatch, file must be downloaded again: want sha256:949974ebf5978d3d2e232dee08cc6ebef273f7188731532aadc7eb46ce656dae, got sha256:a130abb4151ed85bdc93ba23a396ec4489de3f110e019d140bc37b95d22f835d
@Bortus-AI commented on GitHub (Jan 19, 2024):
still unresolved....how frustrating. Ran this 9 times so far and about to give up again
@Donno191 commented on GitHub (Jan 19, 2024):
I have a 14900K and if i have 32gb ram on kubuntu 22.04 then my checksum matches if i increase the RAM to 128gb then my checksum only matches on the first few minutes. I do no think it is an ollama error i think it is a ubuntu error. Can others confirm how much ram and what os please ?
@Bortus-AI commented on GitHub (Jan 19, 2024):
@Donno191 I have the issue on all of my dedicated servers. One running ubuntu and another running Debian. I haven't tried it on my fedora server yet but can try it when I get a moment.
@mhaustria2 commented on GitHub (Jan 21, 2024):
Same error with mistral and dolphin mixtral. It looks like it only happens with larger models
@iam-Justin commented on GitHub (Jan 22, 2024):
You are right. But I think it is not OS's problems. I find btrfs error in system logs. So it is memory error, I think. Even I copy a model to another place. It will report a btrfs error. I changed my memory card. It works fine now.
I recommend ollama to split large files to improve efficiency when repeating downloads to reduce traffic consumption.
@Donno191 commented on GitHub (Jan 22, 2024):
I have 4 stick of 32gb, i can put any one stick in my computer and have no problems. I still think a OS/kernel or maybe checksum library itself ?
@Donno191 commented on GitHub (Jan 26, 2024):
Checksum works on 128gb ! It turn out to be under voltage on my ram, setting the ram to right voltage solved my problem.
@sage-khan commented on GitHub (Jan 28, 2024):
got this error while using mistral
@gautam-fairpe commented on GitHub (Feb 7, 2024):
This error occurred while pulling llam2 70B, other smaller models works fine.
Sys config : 64 Gb Ram
OS: Ubuntu
@Donno191 commented on GitHub (Feb 7, 2024):
Do memtest on RAM
@yswqq commented on GitHub (Feb 19, 2024):
I faced the same issues and finally, I found a way to use 'pull' in ollama using docker environments.
I think when we use the latest ollama version then no problem.
https://github.com/ollama/ollama/tree/v0.1.25
0.1.25 is the latest version(I think) and it can use the pull llama2 model successfully.
use this commands in your os(such as ubuntu which is docker installed)
then go to the inside of the docker container
and you will run this command
and finally, you can see this sentence instead of the 'Error: digest mismatch ... blah blah'
now you can run this kind of command to verify ollama-openai-compatibility
then this kind response will be showed, not 404 errors
guys be happy!
@hongbinding commented on GitHub (Feb 27, 2024):
This issue still exists. No matter how many times to try, got the same error "digest mismatch"
Environment: ollama version 0.1.27
WSL Ubuntu
Any workaround or solution? Thanks!
@Donno191 commented on GitHub (Feb 27, 2024):
download gguf model and import model into ollama here is example to import whiterabbitneo into ollama. make file called model file insert the following :
`FROM {insert the gguf path}
TEMPLATE """
{{- if .First }}
System:
{{ .System }}
{{- end }}
User:
{{ .Prompt }}
ASSISTANT:
"""
SYSTEM """
Answer the Question by exploring multiple reasoning paths as follows:
First, carefully analyze the question to extract the key information components and break it down into logical sub-questions. This helps set up the framework for reasoning. The goal is to construct an internal search tree.
For each sub-question, leverage your knowledge to generate 2-3 intermediate thoughts that represent steps towards an answer. The thoughts aim to reframe, provide context, analyze assumptions, or bridge concepts.
Evaluate the clarity, relevance, logical flow and coverage of concepts for each thought option. Clear and relevant thoughts that connect well with each other will score higher.
Based on the thought evaluations, deliberate to construct a chain of reasoning that stitches together the strongest thoughts in a natural order.
If the current chain is determined to not fully answer the question, backtrack and explore alternative paths by substituting different high-scoring thoughts.
Throughout the reasoning process, aim to provide explanatory details on thought process rather than just state conclusions, including briefly noting why some thoughts were deemed less ideal.
Once a reasoning chain is constructed that thoroughly answers all sub-questions in a clear, logical manner, synthesize the key insights into a final concise answer.
Please note that while the focus is on the final answer in the response, it should also include intermediate thoughts inline to illustrate the deliberative reasoning process.
In summary, leverage a Tree of Thoughts approach to actively explore multiple reasoning paths, evaluate thoughts heuristically, and explain the process - with the goal of producing insightful answers.
"""
PARAMETER temperature 0.3
PARAMETER top_k 30
PARAMETER top_p 0.8
`
then run in terminal : ollama create whiterabbitneo-13b.Q8_0 -f Modelfile
if checksum does not work correctly it is most probably your memory this will affect your llm models and it will write in jipperies if your memory is not working properly.
@darlanalves commented on GitHub (Mar 22, 2024):
I see multiple comments that mention Docker, so keep in mind that this is not specific to Ollama itself.
I recently saw similar errors while pulling docker images, unrelated to Ollama.
This seems related to memory indeed. I'll get my RAM tested today.
Also to note: I tried to import llama2 from a Modefile and a .gguf I downloaded earlier.
The tool will still run a sha checksum and fail.
That tells me is not Ollama, or Docker. It's likeli a storage or RAM problem in my machine.
@hellkrusher commented on GitHub (Mar 27, 2024):
I'm running the windows 11 version and getting the digest mismatch error when running
ollama run llama2:70b@mhaustria2 commented on GitHub (Mar 27, 2024):
For everybody who has this issue on Windows with DDR5 rams:
If you bought for example two 32gb ram modules and figure out later, you want to have another two 32gb modules. You have to sell your first modules and you have to buy a set of 4 modules. Otherwise there is a big chance you run into these problems. It does not matter if you buy the same series from the same brand again, you have to have a complete set. You just can not upgrade your ram like you were used to. I had a long discussion with Asus about that and they confirmed that this is the exact issue. It gets much more obvious when you read through reddit or search the internet about xml profiles. Lots of people deactivate these profiles because they have issues when xml profiles are activated. I talked with lots of friends and all of them had xmp deactivated because their systems were unstable. You will find all different kind of solutions with increasing voltage and so on. At the end its the modules that were not made to work together. Without the xmp profile the run stable enough to play games, but they can have this issue that we all suffered here.
If you are not sure, run a ram test. I could confirm to get ram errors when I used two sets from the same model and brand. After I bought a set of 4, all runs smooth. I guess PCs are not that upgrade friendly after all :)
@cirosec-ffr commented on GitHub (Apr 21, 2024):
Hello everyone,
I have a very powerful newly installed machine (Ubuntu 22.04, AMD Ryzen 9 7900X, 64GB DDR5) with a stable high bandwidth internet connection. Despite this, I was unable to download any model larger than ~15Gb without it being consistently corrupted. I was able to download smaller models, but only after retrying a few times. The behaviour was reproducible with a self-compiled binary, the Docker container and a direct installation on the machine. Two different SSDs were tested, but the problem persisted.
I investigated the problem and found that in my case many downloaded parts over 200Mb in size were corrupted immediately after downloading. I also did a bindiff between the files (in this case llama2:latest) which showed that some random positions had some bits changed, but they didn't look like random bitflips (see attachment example.txt).
I suspect, the bug is located somewhere in the retry mechanism for the individual parts, but I was unable to fix the issue.
Dirty Fix
Since a packet size below 200 Mb worked fine for smaller downloads, I adjusted the constants in the code as following:
And I made the following adjustment for the number of parallel downloads, which limits the parrallel downloads to 32
During my tests I have not noticed any impact on the performance of the downloads.
Also, I used to see the behaviour described in #3794, where a single slow download could cause extremely long download times for the remaining <1% of the download. This behaviour is now gone for me as well.
Reducing the number of parallel connections should also benefit users with slower internet connections (#3741 #3786).
To accommodate all users, it would maybe be an option to generally reduce the values or offer environment variables to control at least the number of parallel downloads. I can provide a pull request if that helps :) @mxyng
@daymade commented on GitHub (Apr 23, 2024):
thank you @0xffr, unfortunately this dirty fix doesn't work on my machine
@programmercitizen commented on GitHub (Apr 25, 2024):
THIS ISSUE is hurting the wider adoption and usage of Ollama.
I have confirmed with multiple systems on both Linux and on Windows that the larger models receive a handshake timeout and / or a digest mismatch.
NONE of the above suggested workarounds solve the issue.
I do not know of any way to get docker downloads to ignore the digest values, but maybe one of the ollama developers could allow it with a command line switch.
Ollama developers could expose the Docker command line switches which govern the number of connections accessible to end users to lower the chances of the download corruption from occurring (i.e. --max-concurrent-downloads).
When downloads do fail, exposing the pruning option for ollama would also be useful.
Last resort workaround (hammering cloudflare):
ollama run llama3:70b && ollama run llama3:70b && ollama run llama3:70b && ollama run llama3:70b
etc

@ksX-1 commented on GitHub (May 1, 2024):
I'm getting the same issue on my side. Running ubuntu 24.04 LTS, Nvidia drivers installed.
I yesterday, I was having issue with the bigger parameters models and smaller were fine. Did a complete re-installation of ubuntu from scratch this morning and I'm now getting the issue with smaller models:
At the beginning I thought it was perhaps the security policies of my FortiGate firewall. Then I bypassed it and got the same issue
It is really random and tried several suggestion from this thread to no avail.
@sage-khan commented on GitHub (May 2, 2024):
Which version of Ollama are you using? @ksX-1
The latest version does not have this issue on my end.
However, do look into the file permissions as well because when this error is fixed, we get permission error issues. So I made sure I'm using
This also may be due to internet issues. Packets may break.
@ksX-1 commented on GitHub (May 10, 2024):
Small update @sage-khan:
File permission were good. I did another attempt (re-installed ollama again on Ubuntu 24.04). This time installed version 0.1.34 (was running 0.1.33 previously).
I was able to download 9 models that same night:

however the next morning, the digest mismatch started again. I haven't been able to put additional model since.
I'm not sure how the files are setup at their location. (load balancing with sticky bit and one of the server might have files corrupted)
and I keep hitting it?
@t29mato commented on GitHub (May 10, 2024):
I am also facing a similar issue on Ubuntu 24.04 LTS while trying to use ollama pull llama3. Despite attempting to re-pull the model more than 10 times, I am consistently encountering the following error and have not been able to download the model even once successfully:
@ravi-aii commented on GitHub (May 16, 2024):
I am also facing this error, while trying to pull the ollama. I am installing in the windows 11 os.

even repulling doesnt help me. Anyone found solution for this, need help.
@tomcontr commented on GitHub (May 21, 2024):
same here. Can just there be an option to avoid the check?
10 times I've tried to download mistral:8x22b 80GB.
It works fine with llama2 or llama3 models (smaller ones).
@JonLaRue commented on GitHub (May 30, 2024):
I am plagued with the error as well with Lllama3:
@OldishCoder commented on GitHub (Jun 4, 2024):
Same sad story here...
Just installed the latest ollama (v 0.1.40) image from docker hub
trying to pull deepseek-v2 (edit: I also tried some smaller models, same issue)
receive "Error: digest mismatch, file must be downloaded again: want sha256..."
Have tried 5 times now... the hash for the downloaded blobs is different each time...and does not match the target in any case
In the logs I do see lines like:
I have a fast connection. This is an enterprise workstation with ECC RAM (384 GB of it)...
Something in the download code seems broken or at least brittle...
@JonLaRue commented on GitHub (Jun 4, 2024):
I think this may be an activity that is blocked by our Enterprise IT Overlords. I am having no issues on my windows 10 home network based deployment. Work PC is Windows 11 and does employ a proxy server to Mcafee to filter, this may be something that proxy server is blocking. Being able to skip this via additional command would be nice and allow for wider adopting of Ollama.
@OldishCoder commented on GitHub (Jun 5, 2024):
Is there any way to directly download the models, and related files? Without ollama? Maybe a wget sequence?
With regard @JonLaRue comment about corporate IT and firewalls, yes, I am behind a firewall, but I do not have any issue downloading very large files from other sources, including the huggingface hub. Usually if IT blocks something for us, it is fully and truly blocked and doe not work partially...
@JonLaRue commented on GitHub (Jun 5, 2024):
@OldishCoder same as you can download LLAMA3 in 30 seconds, fail checksum. I am looking into how to manually the model via HuggingFace Download.
@jtencioc101 commented on GitHub (Jun 6, 2024):
I am having this issue aswell. I am running ollama on a server, I found out that after a restart I am able to download the one model succesfully, but a subsequent download will fail.
Not sure what is happening but having to restart my server before attemting to pull a new model it's no fun all all.
@rafazafar commented on GitHub (Jun 8, 2024):
This error seems to occur for both larger and small models. llama3:70b or phi3, same results. MacOS 14.4.1 (23E224)
@tomcontr commented on GitHub (Jun 8, 2024):
Does anyone knows how to recompile the code so we can disable that check until the issue gets resolved?
@papillon commented on GitHub (Jun 9, 2024):
I spent a few hours debugging this issue on my Windowsw/Linux dual-boot machine.
On Linux, it seems to be caused by a combination of how ollama uses golang to download the models and my setup. A sample golang program using a similar code structure to download parts of the model in parallel succeeds most (not 100%) of the time, while ollama fails almost always. As soon as a BAD RECORD MAC error appears, the download will be corrupted. Even replacing the elaborate download logic of ollama with the simplest possible golang http request fails - apparently, because it's running inside the ollama process.
A simple wget on the model's requestUrl always succeeds. As do several attempts to download Linux ISO images and checking their sha256 checksum. 100% success rate on those.
The same machine runnning ollama on Windows has no issues.
Also Ubuntu running on WSL2 on Windows has no issues.
So for me, it's not faulty memory or anything external like firewalls. It's something between Linux, golang and ollama.
@phillmorgan28 commented on GitHub (Jun 11, 2024):
Receiving the same issue when trying to download codellama:70b on Windows 11. The digest is never the same between each attempt.
@OldishCoder commented on GitHub (Jun 11, 2024):
Hi @papillon
Per your post:
How are you getting the model's requestUrl? I'm trying to understand if I can bypass the broken download code using wget.
@papillon commented on GitHub (Jun 11, 2024):
Hi @OldishCoder,
I have added the requestUrl to the output generated in the Prepare() function in download.go like this (around line 140):
slog.Info(fmt.Sprintf("downloading %s from %s in %d %s part(s)", b.Digest[7:19], requestURL, len(b.Parts), format.HumanBytes(b.Parts[0].Size)))If you run
ollama serveand pull a model, you will see the an output like this:source=download.go:141 msg="downloading dd0c6f2ea876 from https://registry.ollama.ai/v2/library/codegemma/blobs/sha256:dd0c6f2ea876e4c433325df3398386f24e00d321abf6cec197c1bc1fcf1e0025 in 16 100 MB part(s)"@OldishCoder commented on GitHub (Jun 11, 2024):
Hey @papillon , thanks for the information... I also decided to see where the download was going on the filesystem, so did put a couple of prints in download.go, fn downloadBlob() printing the file path fp line 348 and requestURL on line 371... but...
I had been running in the "official" docker image and it would not complete a download... and I could not figure out how to build or substitute an ollama image into the docker container....with the changes I made I ended up running ollama inside my WSL2/ubuntu environment instead and it printed the information requested... but then the hash check passed?
So I am a bit confused. Is the docker image faulty? Does adding print statements have a Schrodinger's cat effect? :)
I was able to pull and load deepseek-v2 and qwen2 successfully...
Since I compiled my own ollama executable on Ubuntu/WSL2 (Windows 10), I have not seen any download issues.
FWIW, using: go version go1.22.4 linux/amd64
Performed a git reset --hard, then a git pull and rebuilt... all seems fine now.
I suppose I could fall back to using wget in case I get stuck... but so far so good...
thank you!
@Programmeurke commented on GitHub (Jun 19, 2024):
@papillon @OldishCoder to add this extra logging to download.go on Mac, do you need to compile the Ollama app yourself using Xcode ? Or is it simpler than that? I never compiled code on my mac, and not sure if it would work (laptop controlled by company). Thanks !
@papillon commented on GitHub (Jun 21, 2024):
@Programmeurke I'm on Linux and can't give you specific tips for Mac. You might want to follow the developer guide.
Basically 1. setup dev tools 2. clone the ollama repo and 3. modify download.go
@jimbojw commented on GitHub (Jun 24, 2024):
I'm trying ollama for the first time on Ubutu today, running into this issue on my first attempt of
ollama run llama3:Is it possible to actually use ollama?
@papillon commented on GitHub (Jul 1, 2024):
After many hours of investigating this topic (and € spent), I concede that the root cause was a hardware issue. My processor (Ryzen 5900X) required extra voltage for one of its cores to function flawlessly. Identifying this was particularly challenging because typical stability tests like memtest86+ failed to reveal the problem. Additionally, it only manifested on Linux probably due to differences in how processor cores are utilized compared to Windows.
I found corecycler (https://github.com/sp00n/corecycler) extremely helpful with this. That's how I identified the faulty core. By applying extra voltage to that specific core through AMD's curve optimizer, I managed to achieve full stability for ollama download on Linux as well.
@JonLaRue commented on GitHub (Jul 1, 2024):
Hey there I think the issue we’re all working on is a digest mismatch, many of us are on Windows and not Linux and using Intel CPUs.
I have HWbot records on Ryzen 5800X and 5950X overclocking and the stability issues you’re experiencing sound like it was more for system stability rather than downloanding and using the models correct? If happy you got your CO configured properly.
@OldishCoder commented on GitHub (Jul 1, 2024):
In my case, on windows (with WSL2/ubuntu) using docker, I get the digest mismatch error consistently using the official docker image. The problem goes away entirely if I just compile my own version and run it in a WSL2/ubuntu shell.
I have no idea if it's a toolchain issue, a library/version issue or maybe a hardware issue, but there is most definitely an issue with the official Docker container on my machine.
I tried to build the full docker image, but get stuck in some Centos7 yum vs dnf certificate issue in rh_linux_deps.sh... being fairly new to docker, I got stumped? But since I could build to run natively, I just moved on...
@papillon commented on GitHub (Jul 2, 2024):
@JonLaRue, I am not sure why you are saying this - Of course we are talking about digest mismatches and what the majority of people might use for CPUs and operating system is of no relevance for this isse. You might want to read my post further up to get the full picture.
Sorry, but I do not know what HWbot is and why you are talking about overclocking. You seem to have misunderstood my post or might not have read my previous posts for context. The problem I had was digest mismatches and the reason in my case was not a software but a hardware issue (which only surfaced on ollama downloads on an otherwise "stable" system). I made this comment to make it clear that my initial suspicion (see above) was incorrect and that ollama is not to blame at all (again: in my case).
@buchbergerd commented on GitHub (Jul 22, 2024):
Is there a solution or a workaround for this? I tried
ollama pull llama3:70bfor more than 15 times in a row, still with no success and changing sha256 (got:...). That was on version 0.2.1. I tried 0.2.7, but there it is just stuck at "verifying sha256 digest". With 0.2.6, I get the same sha256 errors again.Is there a list with the URLs for manual download or something like that (without the need to add a print statement in the code)?
@papillon commented on GitHub (Jul 23, 2024):
In theory you could download the files without ollama and recreate the necessary files in the right places... but it's a bit involved. The easiest option for me was to disable the digest check in code. When I did that, I got a "successful" download, but the model was not usable (ollama crashed).
All in all, I would recommend that you try to fix your PC (e.g. fix your processor to it's base clock to prevent boosting and/or lower RAM frequency). It's the most likely cause for this error according to my testing.
@gitsang commented on GitHub (Jul 25, 2024):
For reference, I previously deployed using Docker in a Debian 12 container in LXC, but I couldn't pull the model no matter what. However, when I switched to VM deployment, everything worked fine. Some people also reported that changing the memory or processor voltage allowed them to pull successfully. I suspect this is indeed related to hardware or cpu instructions. But I still can't figure out what magic happened between them.
I think that when you are unsure how to fix the hardware, replacing the deployed container or environment may help resolve the issue more quickly.
@hacker-szabo commented on GitHub (Aug 12, 2024):
I cannot pull ANY models. I returned nil in the function called "verifyBlob", it does the check for hash sum. I have built it based on the development.md.
Now I can pull. And I can also use the official version with it.
@jacdavi commented on GitHub (Aug 26, 2024):
I was behind a proxy and resolved it following this answer: https://github.com/ollama/ollama/issues/729#issuecomment-1906311485
@kr1ps commented on GitHub (Sep 6, 2024):
Hi folks,
Could this issue be related to the AMD Threadripper processor? While searching for this error, I noticed that a few other people with the AMD Threadripper processor seem to have experienced the same problem. I am unable to download most models or create models from files. I've already tested this with two Debian-based OS (Pop!_OS and Ubuntu 22.04) with no luck. I'm not behind any proxy, and models download just fine on my M2 MacBook Pro.
Here is my spec on my AI Rig;
OS : Pop!_OS 22.04 LTS x86_64
Kernel : 6.9.3-76060903-generic
CPU : AMD Ryzen Threadripper 3970X (64) @ 3.700GHz
GPU : NVIDIA GeForce RTX 3090 (x2)
Memory : 128666MiB
@kilmarnock commented on GitHub (Sep 6, 2024):
Hi @kr1ps,
had the same problem with my Threadripper 3920X until I turned off all energy-saving functions in the BIOS. That brought my error rate down from 99% to zero.
You might find that giving your CPU and RAM more power, whatever the function is called in your system, could help as well. In my case, the function was called ASUS EPU (Energy Processing Unit), which I had to disable.
@kr1ps commented on GitHub (Sep 6, 2024):
Hi @kilmarnock ,
Thank you for the advice, but unfortunately, it didn't work. I've tried nearly every method related to disabling power saving. I'm currently using an ASRock TRX40 Creator motherboard
@Xyz00777 commented on GitHub (Sep 6, 2024):
having the problem on an amd epic 7551p with supermicro board, so possible the same problem?
im using cpupower to set the energy govenor on ondemand but the lowest it set is 1,2GHz, but these shouldnt bring any problems.... but didnt had time to test it to set it on max power or something like that
@kilmarnock commented on GitHub (Sep 6, 2024):
I have heaps of options to slow down RAM, Processor and to rise the voltage level for RAM and processor on my Asus PRIME X399-A. RAM should be the bottleneck here. You could try to decrease the frequency for the RAM in the BIOS.
@Xyz00777 commented on GitHub (Sep 7, 2024):
but the ram inside of my server is ddr4 with ecc... so it shouldnt be the ram because at least on my system it should get error corrected if it happens inside the ram but it isnt....
@rafazafar commented on GitHub (Sep 8, 2024):
The most reliable way for me on M1 Mac behind enterprise proxy/firewall was to re-build ollama with numDownloadParts =1
06d4fba851/server/download.go (L97)Was surprisingly easy to do following the development docs.
@papillon commented on GitHub (Sep 9, 2024):
Try to disable boosting (PBO2) and thus fix the frequency to it's default value. This made my 5900X error free with ollama (which of course meant that the CPU got sent to back to the reseller like a hot potatoe). Since I have switched to the replacement 5950X, everything is fine.
Seems ollama is one of the most sensitive stability tests on my machine. I even modified it so that it compares every downloaded chunk to a copy of the file that I know to be correct, so it errors immediately and not only after the download is done. Helped a lot in running these tests hundreds of times. Now the question is: How can I create a reproducable stability test out of this ;-)
@Xyz00777 commented on GitHub (Sep 9, 2024):
shouldnt something like these be implemented in general? so the download and checking process can be parallelised?
how did you done that?
@jimbojw commented on GitHub (Sep 9, 2024):
Downloading files shouldn’t be this hard. Truly. Solutions for sharing large files have existed for many years (BitTorrent for example).
How can we use a trustworthy, extra-process download system instead of these buggy internal downloaders?
@papillon commented on GitHub (Sep 9, 2024):
@Xyz00777:
The ollama repositories would have to provide a checksum for each chunk, which is not possible since the chunk size is more or less dynamically determined right before the download starts. In my special test case, I've downloaded the correct file beforehand to compare while the test download is happening. This is not something that can be implemented in general.
@jimbojw
I cannot speak for the ollama project, but it should be clear that it's not ollama's fault if the CPU or RAM is not reliable. And of course, one verified singular instance where the hardware was to blame (i.e. my case) does not mean it's always the hardware. It was in my case, but YMMV.
@jimbojw commented on GitHub (Sep 9, 2024):
Agreed that ollama can’t be responsible for userland instability.
However, the pervasiveness of the problem suggests to me that CPU/RAM issues cannot be the root cause for everyone. And if they are, then ollama ought to test for these issues and report them before initiating large downloads that are doomed to fail.
The Internet is built out of fault-tolerant layers on top of faulty underlying transportation media. TCP for example. Downloading content in chunks and checking said chunks for errors (and re-downloading them) is a well-studied problem with widely available solutions.
Downloading a whole 4-40GB file just to find out a byte somewhere was wrong (but not which one) ignores these existing solutions. Any user capable of running ollama (a command line program) is also capable of running an out-of-band, fault-tolerant downloader.
Sorry for the rant. I guess I would just encourage ollama to deprioritize downloading models directly from the CLI, and instead prioritize simplifying the workflow wherein users download model files themselves and load them in. (If this has already occurred, I apologize. Last time I played with ollama, I found it difficult to side-load a gguf).
@kr1ps commented on GitHub (Sep 10, 2024):
Hi,
Just to give a little more context, I don't think it's a downloading problem. This is because I also tested downloading the models on another machine, exported them, and then tried to import them into my main AI machine. With this, I also got the same problem in 99% of the tests. It's a little frustrating, because I don't think my machine has any hardware problems or anything related, since other stuff like Docker works just normally.
@leyre-13 commented on GitHub (Sep 12, 2024):
Hi everyone! Hope you`re doing well.
I´ve tried to download llama3.1:8b but I´ve got this error:
Error: digest mismatch, file must be downloaded again: want sha256:8eeb52dfb3bb9aefdf9d1ef24b3bdbcfbe82238798c4b918278320b6fcef18fe, got sha256:81b119cfd22937333c54e815a3a8158f8ff2a37a0f6a4213db6241ab6b949b62
I´ve tried several times as I´ve read your posts and I´ve seen that some of you have resolved the problem trying to download the model several times, but in my case has turned out to be impossible.
My OS is Windows 11.
Can anyone tell me something to solve this?
Thanks in advanced!
@OldishCoder commented on GitHub (Sep 12, 2024):
@leyre-13
It's not clear how you are invoking ollama or which version you are using...but...
I was trying to use the docker version and I kept getting the error you describe, nothing helped...
If I build the go executable directly in my WSL2 environment, it works fine in that same wsl2 environment.
@leyre-13 commented on GitHub (Sep 13, 2024):
Thank you @OldishCoder, below I describe the details you ask:
ollama version is 0.3.10
First, I installed ollama for Windows, then I copied the command to download the model following the instructions they have on their website (ollama run llama3.1:8b), and also tried ollama pull llama3.1:8b because I want to use it with python, and always get the same error.
I was looking for other similar options to ollama but I´ve seen that I need a GPU and my pc doesn´t have,
Any piece of advice?
@Leon-Sander commented on GitHub (Sep 14, 2024):
I am on linux using the latest docker image, getting digest mismatch error the whole time. Tried multiple 7b models. Very annoying.
IMPORTANT OBSERVATION:
I switched to windows and suddendly worked with the docker file, used the same setup.
After reading some comments above about RAM stuff, here is something else I remembered.
When I downloaded some stuff which was splittet into multiple rar files for example, on linux I often encountered a similar kind of error, usually CRC errors, while it worked on windows.
Asking chatgpt led to three main areas of possible problems, and here they are with some solution suggestions:
Suggestions to Troubleshoot
I used smartctl and found no errors and also performed fsck 5 minutes ago without problems. Network is stable, so it seems to come down to ram, which was mentioned earlier a lot.
SOLUTION:
I just did a memtest and got many errors. By testing my RAM bars seperately with memtest86, it revealed that the errors only occur for one bar, independent of its slot. After removing it and working with one bar only, I pulled multiple models in a row and havent gotten any errors anymore. SO GO CHECK YOUR RAM WITH MEMTEST, many ppl here keep asking for a solution while RAM being the problem has been called out multiple times now.
@abdelmalek0 commented on GitHub (Sep 16, 2024):
Any working solutions for this problem?
I have no issue with small models like 7B but anything above 5Gb causes the issue
@jwierzch commented on GitHub (Sep 24, 2024):
same issue, seems to work sporadically but more often than not fails. I found it easier to just download the models from hugging face and follow https://github.com/ollama/ollama/blob/main/docs/import.md#Importing-a-model-from-Safetensors-weights for the meta llama3.1 models. 🤷
@adrian77 commented on GitHub (Oct 8, 2024):
Let me first thank @Leon-Sander which is indeed pointing us in the right direction asking to check RAM. At least for us having following symptoms which are definitively beyond ollama download client:
Root cause
When I checked my RAM I discovered that specific address space 0x54DBDE678 to 0x54FBDF058 (around 32MB) have problem of one bit flipping 0-> 1 and 1 -> 0 on bit 30. Always same bit for every address in that range above. One of the test was i) write value, ii) sleep 3 minutes, and iii) read value showing the value changed by it self.
Address: 54DBDE678, Expected: 40000001, Actual: 00000001
Address: 54FBDE65C, Expected: 9ED915D0, Actual: DED915D0
How it affects download
Knowing the fact that a bit in RAM flips randomly let say once per 10 minute (example) will not only corrupt data in memory leading to processes crashing once in a while, but also affect data that is written to disk or over network.
When downloading a file, the data is first stored in RAM, and then written to disk. If the bit flips between the loading and writing, that corrupt byte affects the integrity of the file which is checked at the download completion. The larger the file is, as in our case where models are large, the higher chance getting a bit flipped. So during download which takes hour(s) I am always hitting into this problem. For smaller models I just need to try 2-3 times to pass the integrity check.
Chip manufacturing problem or aging?
This is one of the things I am analysing and will visit today Apple Store. I had problem with my laptop since I bought it in 2020. A regular program crash every 3-6 months, OS download always failing since they are usually 3-4 GB large, but eventually succeeding, and now downloading 70B model never works and this is where I am right now. There are evidence pointing to chip manufacturing problems due to its consistency of which bit and address segment. But the problem is that similar things can happen after few years of computer usage. Apples Diagnostics at boot taking three minutes to run does not show any problems.
Reflections
I have never in 30 years experienced anything that is so dreadful. I have been suspicious about these rare issues in this laptop, but since I bought a new generation of MacBook Pro 16 when it came 2020 I was hoping rare instabilities would go away by time and OS updates. They never sort of did. Now when I started to use ollama and downloading large models I realised the laptop is pretty useless. Not only that downloading is problem, but also when 8-30 GB large model is loaded into memory it will affect the results and most likely even silently. I can't imagine how a model will behave when few bit will flip. Perhaps the hallucinations will stop or getting worse :) Imagine not trusting your written personal files to disk. :(
Test RAM
@adrian77 commented on GitHub (Oct 8, 2024):
I would also like to share how to overcome this problem as last resort if your computer cant be repaired. Like in my case 4 years old laptop where warranty has expired. The reading below is not for the easy fainted.
Evidence of problem
Executive summary
After running the failed memtest86 test, the HTML report showed me lowest and highest memory segment which are faulty.

By compiling a small efi program that allocates that specific address range, in my case 32MB and executing this program in rEFInd.efi chained context before booting the OS prevents the OS memory management system to use that faulty memory.
.
When running the memtest86 again with address range of 256MB around the 'sick' area it shows passed results:
The memory is not available for allocation for memtest86, nor subsequent EFI chained OS as long as it's chained within initial refind.efi.
The first attempt of downloading llama3.1:70B succeeded without any problems and subsequent models which I tried later:

High level steps by following guides in links below:
Download refind.efi
Download gnu-efi and compile.
git clone https://git.code.sf.net/p/gnu-efi/code gnu-efiDownload and compile disable-ram-area.c to get disable-ram-area.efi
git clone https://github.com/0nelight/macOS-Disable-RAM-Areas.gitDownload Shell_full.efi
Create a NSH script disable-ram.nsh that will run disable-ram-area.efi with arguments of memory address range to reserve.
In my case: 'fs7:\EFI\BOOT\disable-ram-area.efi 0x54DBDE678 0x54FBDF058 3000'
Configure refind.conf with menu entries. End goal is to achieve a chain looking like this: refind.efi -> Shell_full.efi -> NSH script -> disable-ram-area.efi -> boot.efi (macOS or other).

Use belenaEtcher to write into USB. I reused MemTest86 img and dir structure and updated that one.
Reboot computer and boot from USB
Run the disable memory part and boot via refind into OS

! Do not exit and reboot directly into OS, because the memory will be returned back being available for OS. Use the first menu to boot into your OS.
10. Download ollama models
Guides that I used:
Disabling RAM addresses
Guide for MacBook Pro 16 - 2019
@Satya-codewalababa commented on GitHub (Oct 9, 2024):
I Tried this way and it worked, i downloaded the required model from huggingface like gguf model like llava or phi3, and then followed this blog it worked..simple and easy way, DO let me know if this is helpfull
https://dev.to/kamalhossain/how-to-run-already-downloaded-model-in-ollama-548e
@Leon-Sander commented on GitHub (Oct 10, 2024):
@adrian77 thank you for this detailed explanation. I simply removed the faulty ram bar, good to know that you can block access to faullty addresses. For me it was also quite surprising that this is the root cause, since my ram was newly bought when many kinds of errors occurred, always thought it must be something os specific.
@adrian77 commented on GitHub (Oct 10, 2024):
@Leon-Sander Yep. Starting to blame hardware seemed first like a long shot. It's just too rare to believe. But when I started to remember other issues like OS upgrade needed to be retried multiple times (again download large file) and random process crashes it kind of made sense to test the RAM.
I am pinging ticket opener @jmorganca and ticket owner @mxyng to help them to move forward with the ticket.
People in the thread have analysed RAM, CPU, OS, disk, connectivity and alternative methods to download the large models.
@kr1ps commented on GitHub (Oct 11, 2024):
@adrian77
I confirmed that the problem for me was related to memory. In my case, I identified the damaged memory and removed it.
Everything works just fine now.
@ksX-1 commented on GitHub (Oct 20, 2024):
Last update: Been a while, but I upgraded all of my RAM memory (bought 4x32gb for a total of 128gb, used to have 16gb (2x8gb)) and since then I haven't got any problems at all. No issue whatsoever pulling new models since. Nothing changed on my rig except the memory. Thus I can confirm it was memory issue on my side. Hope this help.
@adrian77 commented on GitHub (Oct 21, 2024):
Oki. I added your name too.
But the ticket owner seem to not follow this thread. :)
@sushibait commented on GitHub (Nov 14, 2024):
Same issue here. So far none of these fixes work for me. Running Ollama on Win 11 Pro workstation edition with a 14900k, 128gb ddr5, all nvme storage, Nvidia RTX A6000. Tried disabling my firewall device. Tried plugging my machine directly to the interwebs. Tried uninstalling /reinstalling (normal removal and tried Revo Uninstaller). Disabled and even tried uninstalling all anti-malware.
Can't make it work. ONLY happens on larger models.. right now issue is with deepseek-coder-v2:16b-lite-base-fp16. I've probably tried it 10x.
Edit: The machine is doing nothing else. All it runs is ollama.
@Leon-Sander commented on GitHub (Nov 15, 2024):
Do I understand correctly that you tried all workarounds except the one fix at the root, which many people here talked about which is to check your ram and remove broken ram/block broken adresses?
@adrian77 commented on GitHub (Jan 6, 2025):
@irboi746 the test you performing is not correct. First you allocate 20GB only, meaning you might not hit into the issue since you have 64GB and you dont know which part if memory is faulty. Second you are running it under OS and using memtester and not using libraries/kernel for read/write data into FS. Meaning if the corruption happens in the kernel lib or kernel mem that will not be seen by your test. The proper way is to test the complete memory before OS boots so all memory is disclosed for testing.
Then additionally, it's enough have one bit wrong in the data either on reading/writing from network or disk (since all IO operations are passing via RAM). And it's a function of filesize, time & probability. The bigger file, the longer the download and that increases to hit into the dat corruption because all the data together will be checked.
@tonmsaha commented on GitHub (Jan 16, 2025):
λ ollama run mxbai-embed-large
pulling manifest
pulling 819c2adf5ce6... 100% ▕██████████████████████████████████████████████████████████████████████████████▏ 669 MB
pulling c71d239df917... 100% ▕██████████████████████████████████████████████████████████████████████████████▏ 11 KB
pulling b837481ff855... 100% ▕██████████████████████████████████████████████████████████████████████████████▏ 16 B
pulling 38badd946f91... 100% ▕██████████████████████████████████████████████████████████████████████████████▏ 408 B
verifying sha256 digest
Error: digest mismatch, file must be downloaded again: want sha256:819c2adf5ce6df2b6bd2ae4ca90d2a69f060afeb438d0c171db57daa02e39c3d, got sha256:cf316b361af240aef27d281497170105d269adc479c0af2ca9f136ae5a2bb013
This issue persists on Windows 11 although the internet connection is good. Can't pull any model instead of all-minilm 🙂
@Leon-Sander commented on GitHub (Jan 16, 2025):
@tonmsaha check your ram
@scy-helbling commented on GitHub (Jan 16, 2025):
You can add me to the list: Pulling a model with > 9GB always failed with a different sha256 digest. I suspected having a faulty RAM block as suggested here. Figured out which one by iteratively running memtest68 on all of them individually. Removed the failing one (16GB, one of 4) -> pulling works like a charm again.
@deprec8ed commented on GitHub (Jan 17, 2025):
Will there be any official info on what to do about it? Also if there's anything that I could do to overcome this issue? At first I thought it was an issue with a corporate proxy or something similar. I also can't use any external USB's to verify the RAM although it's recently bought MacBook Pro M4 with 24GB RAM. Is there any other approach I could try to verify if it's faulty RAM? Also I was able to only get smollm:135m, and so far every other model seems to fail:
And here is the only one available:
Specs:
MacBook Pro M4 Pro
12 Core CPU
16 Core GPU
24GB RAM
Sequoia 15.2
512G SSD Storage
@jimbojw commented on GitHub (Jan 18, 2025):
@adrian77 said (emphasis mine)
This is the key problem. Rather than waiting until the whole file is downloaded and copied to find out that a bit flipped, a better approach would be to checksum the parts and re-download each bad part as it is discovered.
Instead of everyone having to ensure that they have pristine hardware, personally, I would like to see integration with a robust, large-file download solution like BitTorrent. Or, if pristine hardware is an irreducible requirement, then incorporate a hardware checker into the setup/download process.
@adrian77 commented on GitHub (Jan 20, 2025):
@jimbojw sure one can change download protocol etc, but it will not change the fact that your computer once loading the file/fragments into memory will behave unpredictable. The post is about people getting upset about download not working, but the bigger harm is post download. Your LLM inside memory will not behave as you want because of corrupt RAM. You will suffer from LLM dementia.
@jimbojw commented on GitHub (Jan 20, 2025):
That is a reasonable concern. Maybe ollama could incorporate some kind of memory check, or link out to another tool.
However, one should still not have to re-download whole enormous files in the case of a memory error. It should be feasible to independently checksum small portions of files, and just re-download the corrupt segments.
Personally, I’d rather see ollama get out of the download business entirely. Make it easy for people to load models from disk, then let them use robust download tools like BitTorrent to acquire the files.
@adrian77 commented on GitHub (Jan 20, 2025):
The first level of mitigation of these problems are solved on hardware level, it's called ECC.(But not for laptops or desktops with non ECC memory). In rare cases where memory bits flips the ECC compensate for it. So normally there are no software level corrections built-in into for dealing with these errors. Ollama, VirtualBox, Videoediting tools or OS don't performs things like that.
Yes, smaller files might get you into thinking that something is improved and in general I am not against it, because you can re-download those to be suspected fragments again as you said. But think it in this way. Let's say you have 10 000 files totalling 40GB data and you checksum and they are ok. Then assume you checksum all 10 000 files every single day and every day one random file claims it failed checksum. Now you are getting suspicious and instead of downloading you re-iterating process of checksumming the files, but this times all passed. How are you now thinking about this problem? My key questions to you, which I eventually boiled down to. Can you trust you computer now?
The answer is no, hence people in this thread are ripping out their DIMMs or disabling RAM sector like me avoiding the OS to use that malicious memory.
I am saying no, because there are other use cases which are not related to ollama which are even worse.
You commit file, what is written downstream is not what is in your local repo.
You change your password, what is input for hashing and stored is not what you provided(one character changed value), locking you out from your computer.
@jimbojw I replied mainly because of your statement of what the key problem was, which was based on my comment related to large files. I was just stating that large files are increasing likelihood compared to smaller models. But changing 1 large file to 10000 smaller files doesn't change likelihood hitting into the problem. I hope you understood what I meant?
...
@byteconcepts commented on GitHub (Jan 24, 2025):
Hi again,
after reading in this issue again and now checking my RAM I can certainly surly say that faulty RAM is also the reason on my side.
That's highly annoying because these are already a complete new set of 4 VENGEANCE RAM modules as repacement for faulty ones.
It seems to me that we are all constantly getting ripped off when it comes to buying RAM.
yours Henri
@Xyz00777 commented on GitHub (Jan 24, 2025):
my system is all ecc, its an server, and even there i have these issue, downloaded a 14B model these afternoon 10 times until it worked and another 32B model after the 14B model worked first time...
@byteconcepts commented on GitHub (Jan 25, 2025):
@Xyz00777 : My System also has only DDR5 ECC RAM but is also faulty.
As I read...
The bigger the files the more probable it is that an error MAY occur.
You should do the memtest86+ check to be sure.
Sometimes it seems to help to raise the DRAM voltage from 1.1, but if you try so, be careful; I tried to raise mine to 1.35V and now it doesn't boot anymore, not even into BIOS.
I've been trough this before and it's a real pain: I will, probably tomorrow, remove the 4090, then a fan from inside the cooler, then the cooler itself to finally reach all RAM modules, remove 3 of the RAMs, try to boot then again to get back into BIOS again, set the voltage back down again and then try again. - Maybe I will have to shuffle the RAM modules around then until it will boot again. :-(
Think twice if you may possibly want to go through that all.
For that reason I will probably never build a server again myself but buy a ready to run one and the first thing I then will do is do Memory-Checks from USB. If those will fail I will directly send the whole thing back to get a replacement until I have an error-free box.
Yours Henri
@byteconcepts commented on GitHub (Jan 26, 2025):
Hi again,
I took the road of disassembling my server to get to the memory bars for testing them all four individually.
Because my box didn't even boot into BIOS before after raising the DRAM voltage to 1.4V I at first was able to boot again into BIOS with only one RAM bar.
After taking a closer look at the sticker on the RAM bar, I found out that that said 1.2Volts and not 1.1V like the detail webpage of the product!
So I lowered the Voltage back to 1.22V, which should be fine because the detail webpage of the product said "Tested with 1.25V".
Now I was able to start the server again normally and tested all four memory bars individually and indeed one of them is actually broken and produced thousands of errors on memtest86+'s last test.
It really is a shame that they seem not to do proper quality testing of memory bars and seem to be fine with it to constantly sell a big portion of corrupt memory bars to the customers.
As I already wrote this set of four RAM bars IS ALREADY a brand new replacement for corrupted memory bars they sold me when I built the server!
And we also see, that having ECC RAM does not always correct these errors!
Now that I finally assembled the server again I was able to...
...on the first try!
If you have problems with the checksum trying multiple times it seems pretty sure that your RAM is broken!
If you may think: Oh, maybe it's the stubby DNS server of my Ubuntu box:
If you can still resolve for example google.com I strongly doubt that because of DNS caching.
Not always two things happening at the same time are related! Sometimes it's just pure coincidence.
(changing DNS Servers in /etc/resolv.conf and then getting the correct checksum)
Ah, and btw: Before this I got corrupted pixel-grids in ComfyUI which lead to black images every nth generation randomly and it didn't seem to be related to one Checkpoint or LoRA.
The only thing that helped then was completely rebooting!
That problem seems to be gone away also!
Like @adrian77 already wrote: "there are other use cases which are not related to ollama which are even worse"
Yours Henri
@adrian77 commented on GitHub (Jan 28, 2025):
@byteconcepts
One needs to do everything your self. :)
Under voltage of ram is causing 1's to drop to 0. And it's not something you can isolate away by disabling segments since it can happen on any memory cell. Good catch!
@deprec8ed commented on GitHub (Jan 28, 2025):
Can you recommend any way of checking the RAM on a MacBook with M series chip? I ran the diagnostic tool to check the machine but it didn't return any errors. I checked and the memtest86 doesn't support apple ARM chips
@adrian77 commented on GitHub (Jan 28, 2025):
If I would be you, I would go to Apple Store and say "I suspect the machine have faulty RAM, can you please run full memory diagnostics". If there is fault you might get the motherboard replaced if the laptop is not too old.
I think apple has closed the unified ram, abandoned UEFI and introduced secure boot in a way that it becomes quite hard for end users to discover potential HW problems. The same applies with NVME problems and CPU issues.
@renxunsaky commented on GitHub (Jan 29, 2025):
Hi,
I had the same error on my professional MacBook which is behind a proxy.
There is no error on my personal MacBook. I’m using the version 0.5.7 on both.
After changing the numDownloadParts to 1 and increased the maxDownloadPartSize to a very big one, I don’t have the problem anymore, though it slows down the download speed. But it’s better than the unstopping mismatch error.
Perhaps there is a calculation problem for the parts ? Or a problem when it merges the parts ?
@doug62 commented on GitHub (Jan 30, 2025):
Same here
@raffaeleguidi commented on GitHub (Feb 1, 2025):
Same here. Behind corporate proxy that performs https inspection. The download seems correct but doesn't pass the digest verification. I guess the proxy opening and reencrypting the SSL answer screws up the overall digest calculation - maybe headers are included in the calculation (they should not)? Could it simply be transformed in a warning letting us choose whether take the risk?
@iefsu commented on GitHub (Feb 2, 2025):
is there a way to disable hash checking?
@renxunsaky commented on GitHub (Feb 2, 2025):
It’s a good point, but I don’t think they include the certificate in the digest calculation. Because when I changed to only one big part to download, it works.
@renxunsaky commented on GitHub (Feb 2, 2025):
From what I see in the code, no, you can’t disable it by an option.
@doug62 commented on GitHub (Feb 2, 2025):
This error comes from: ollama/ollama/server/images.go, method verifyBlob, line 818,
That method is called from method PullModel line 583, inside PullModel there is some code to skip verification:
if skipVerify[layer.Digest] {
continue
But I'm not sure of the how/whys of that.
This logic might be faulty:
// GetSHA256Digest returns the SHA256 hash of a given buffer and returns it, and the size of buffer
func GetSHA256Digest(r io.Reader) (string, int64) {
h := sha256.New()
n, err := io.Copy(h, r)
if err != nil {
log.Fatal(err)
}
}
*** I WAS ABLE TO FIX THIS BY CHANGING THE IMAGE DNS TO GOOGLE BTW, and I posted that fix earlier.
@deprec8ed commented on GitHub (Feb 2, 2025):
@doug62
I don't see any more posts from you on this thread other than the 2 above specifying that you have the same issue.
For other people, I was also on corporate proxy and changing the numDownloadParts to 1 and maxDownloadPartSize to like 30GB and it fixed the issue for most models up to 14B parameters. I still get the SHA256 digest if I try to download any model thats above ~30B parameters.
Though I didn't dig deep and simply changed the verifyBlob func to return nil if I setup new environment var that would allow for SHA bypass. Then I indeed could download any model but it would work in a weird and unpredictable way (throwing rows of random symbols, or just spewing random text). So I didn't go down that road. But there might be something to look into in the doug's comment
@doug62 commented on GitHub (Feb 2, 2025):
@Incloud3 - Dang guess I didn't hit save, I had to go into my ollama docker container and change the DNS to google following - https://www.youtube.com/watch?v=iSI-corX8HI and it worked, it didn't work when I used my local/lab DNS. My Ollama runs fine within my kubernetes, not well on docker unless I do this.
I'm exploring how to download a local copy of the models and storing them on my NAS/NFS/Share - that might be interesting.
@raffaeleguidi commented on GitHub (Feb 2, 2025):
@doug62 I am accessing the internet through a corporate proxy, the DNS of my Mac is internal and is, as the proxy itself, managed by my organization thus being so unlikely to being changed that is not even worth a try :) I am almost sure that the problem comes from the inspecting proxy changing something in the headers, but this should not corrupt the data transferred - everything that gets downloaded (even signed executables) works fine
@doug62 commented on GitHub (Feb 2, 2025):
@raffaeleguidi Try runnng this command in your ollama image or VM if you can:
echo "nameserver 8.8.8.8" > /etc/resolv.conf # Thanks to -> https://www.youtube.com/watch?v=iSI-corX8HI
Your response gave me a thought, these models are stored a bit like docker OCI images, each layer has a sha, these shas can be different different registries but always the same on the same registry. If the ollama model servers did round robin ingress/load balancing and didn't respect affinity cookies and/or they did DNS load balancing and your sha verification calls came back to a different server these shas would mis-match. I wonder if that is it - Model server misconfiguration. If you are an admin on those ingress servers/DNS ping me and I can verify with you.
@raffaeleguidi commented on GitHub (Feb 3, 2025):
As I told you my mac DO NOT have direct internet access - I am behind many corporate firewalls and cannot contact any external DNS and I do not have sudoer privileges, so I cannot even change the resolve.conf file. I only access the internet through a corporate proxy and that makes changing the DNS unrelevant in any case.
I am not. Also this problem shows up for every and each model I tried on my office mac - everything works fine on my personal mac with the same software levels, but, of course, without network restrictions
@raffaeleguidi commented on GitHub (Feb 3, 2025):
I tried to but to no avail. How did you manage to switch to one part?
@renxunsaky commented on GitHub (Feb 3, 2025):
I changed the value in the file download.go:
https://github.com/ollama/ollama/blob/main/server/download.go#L97-L99
and rebuilt the binary on my enterprise Mac.
@doug62 commented on GitHub (Feb 3, 2025):
@renxunsaky Points for re-building, your forcing it to download in 1 layer +1's my assumption that they have load balancer issues i think.
@AlanMW commented on GitHub (Feb 9, 2025):
I am running ollama in a container and I had to update the dns nameserver to be 1.1.1.1 inside the container's /etc/resolv.conf
I just changed the line to
nameserver 1.1.1.1. I also paused blocking on my dns (pihole). Either those fixed it or it was a fluke.@Xyz00777 commented on GitHub (Feb 9, 2025):
i will check if it works with other nameserver but as additional input. i have run rasdaemon on my server with ecc memory and edac and when the hash issue happens i see no event in the rasdaemon log and database, even when i had 5 retrys when i downloaded the latest phi model...
Could it be that its more on the CDN side and in addition on some machines faulty ram???
Because as far as i understood its not a (in my opinion) stable creation of the chunks.... like @papillon mentioned
@doug62 commented on GitHub (Feb 10, 2025):
@Xyz00777 The sha matching isn't hardware related - The error occurs when Ollama calls back to the model registry for a sha and gets the wrong one - so it is on the CDN side, and I think it has to do with server affinity/load balancer/global DNS or something....
@programmercitizen commented on GitHub (Feb 10, 2025):
This is probably due to the resolving of the certificate servers (similar
issue to what the Adobe guys experienced last year)...
On Sun, Feb 9, 2025, 19:24 doug62 @.***> wrote:
@raffaeleguidi commented on GitHub (Feb 10, 2025):
Please consider that client DNS servers are not involved in http/https through a proxy. Also, please do not forget people that do not have a choice and are FORCED to use use a proxy
@n0-ind3x commented on GitHub (Feb 12, 2025):
Have been trying to fix this for a week or two.. thought id share what worked for me..
My Mac had both a transparent AND DNS proxy enabled. The issue was with the DNS proxy. Bypassed DNS proxy by disabling the socket filter and all models pulled or ran from ollama are now downloading just fine.
Settings > Network > VPN & Filters - check whats enabled under filters & proxies
If people are on corporate assets, these settings are most likely locked down..
@raffaeleguidi commented on GitHub (Feb 12, 2025):
I confirm they are locked down - likely for anyone in the same situation. But changing the client configuration is not the way to go - I understand that Ollama models are based on the same stuff docker images are made of and also their model registry is derived from the docker registry server - and I do not have any problem downloading docker images. I honestly would get rid of this custom stuff and embrace a more standard tool - I.e. an artifiactory or nexus binary repo that would also have the plus to be mirrorable, thing that every and each corporation would need
@Xyz00777 commented on GitHub (Feb 13, 2025):
i also had zero problems when i needed to download 5+GB images from docker multiple times a day inside the same vm on the same hypervisor...
@raffaeleguidi commented on GitHub (Feb 13, 2025):
Yeah, I think the issue could safely be renamed as “downloading models through an inspecting proxy does not work”
@Xyz00777 commented on GitHub (Feb 16, 2025):
I'm not using a proxy but having the problem sooo.. 😅
@harisnae commented on GitHub (Mar 19, 2025):
trying to pull phi4:14b-fp16, it takes forever on verifying sha256 digest and than gives out digest mismatch error:
@DikshitRJ commented on GitHub (Mar 26, 2025):
Its been an year and a half and still, this issue hasn't been fixed. Just got the same error while downloading Gemma2. The worst part about this error is that nobody knows why it is happening.
@chakrateja70 commented on GitHub (Mar 26, 2025):
Error: digest mismatch, file must be downloaded again: want sha256:ff82381e2bea77d91c1b824c7afb83f6fb73e9f7de9dda631bcdbca564aa5435, got sha256:6c6086752a59ace6de170e8f349e96d417e47935d79bf5d19ede6cba3772be3d
how to fix this error
@harisnae commented on GitHub (Mar 26, 2025):
so far I have not been able to fix this issues. First, it takes too long to verify sha256 hashes and than throws out a digest mismatch error. As an alternative I have tried litgpt but it also has its own issues.
@chakrateja70 commented on GitHub (Mar 27, 2025):
for me its working
1.by chaning network and pull
or
2.restart system and pull again
@MohammadBnei commented on GitHub (Apr 16, 2025):
Ollama still did not resolved this issue ? This is crazy. I cannot add any new models
@GearUnclear commented on GitHub (Apr 21, 2025):
It fixed for me one day randomly. Something fixed it, could've been IT for all I know.
@darlanalves commented on GitHub (Apr 22, 2025):
At this point I doubt everyone here is facing the same issue.
But just in case, if you have some sort of memory test program in your OS, do try that.
For me, memtest with multiple restarts and memory swaps allowed me to narrow down my case: bad memory slot on motherboard.
If you don't have a memtest program, but you do have sha256sum in your terminal, then try hashing any large file you have around. Do it a few times and see if the hashes always match.
@BenAlabaster commented on GitHub (Jun 5, 2025):
I just used up my entire month's Starlink bandwidth allotment to download deepseek-r1:671b twice to try solving this problem so that's it for the month... no more internet until July 2nd because of this problem. That's about as irritating as it gets. I'm so annoyed right now... and not only that, because the SHA failed, I guess it deletes the file so I can't even try and recover the file somehow... Thanks Ollama - thanks a bunch. This sucks.
@MohammadBnei commented on GitHub (Jun 12, 2025):
Can't we download them separately and add the models manually to ollama ?
@AlpineVibrations commented on GitHub (Jun 19, 2025):
yeah its failing here too on highspeed internet. is there a way to download them with http?
@BenAlabaster commented on GitHub (Jun 19, 2025):
I had to do a little reverse engineering and wrote a Powershell script for it… but you don’t have to go to that length:
It’s not a completely straightforward process, but it is quite easy if you hack through it a bit:
MANIFEST FILE
The manifest file that tells you all the files you need to pull down is found at the following URL… you have to sub the relevant info into the URL for the model you want:
https://registry.ollama.ai/v2/library/<model_name>/manifests/
The model name in my case was deepseek_r1 and the version I was after was 671b, you can sub latest for the most recent one so just sub those into the URL there like so. The most recent one isn't always the most popular though, it's usually more helpful to choose a specific version.
https://registry.ollama.ai/v2/library/deepseek-r1/manifests/671b
That will give you the manifest which contains the SHA files you need… once you have the sha filenames, you need a different URL to pull them down:
BLOB FILES
https://registry.ollama.ai/v2/library/<model_name>/blobs/
You’ll notice it doesn’t need the version for the blobs because they’re all stored together in the blobs directory.
For deepseek_r1:671b, you need the filename: sha256:439dd1a5e05286918f54941f49f9b56118c757440f6333f67f1cd5cbb5c8520b
So the complete URL is:
https://registry.ollama.ai/v2/library/deepseek-r1/blobs/sha256:439dd1a5e05286918f54941f49f9b56118c757440f6333f67f1cd5cbb5c8520b
You can download the other files from the manifest that way too.
NOTES: BEFORE YOU ATTEMPT TO RUN IT IN OLLAMA
Do the SHA256 verification yourself manually, because if ollama tries to open the file and the SHA256 checksum doesn’t match, it will delete the file unrecoverably and you’ll have to download it again which is a pain in the ass if the model you're downloading is large.
POWERSHELL
$computedHash = Get-FileHash -Path $outputFile -Algorithm SHA256
if ($computedHash.Hash -ne $file.sha256) {
Write-Host "❌ Hash mismatch for $outputFile"
} else {
Write-Host "✔️ Verified: $outputFile"
}
BASH
computed_hash=$(sha256sum "$outputFile" | awk '{ print $1 }')
if ; then
echo "❌ Hash mismatch for $outputFile"
else
echo "✔️ Verified: $outputFile"
fi
OTHER NOTES
ollama crashed when I tried to run it.
It did fire up and tried to load, but I gave it one prompt and before it got through thinking, it blue screened everything.
I’ve got a GeForce RTX 5090 w/32GB DDR7, 192GB DDR5 and an Intel 14900K in a ROG MAXIMUS DARK HERO running a stable overclock at 6GHz + CUDA 12.9/Blackwell, so this machine is no slouch.
Not sure whether that will play into your decision to wait for it to download as it’s 404GB.
@BenAlabaster commented on GitHub (Jun 19, 2025):
Additionally, I found this helpful for deciding which models I should take a chance on vs. those I should probably leave until I've got richer sponsors:
https://dev.to/askyt/deepseek-r1-671b-complete-hardware-requirements-optimal-deployment-setup-2e48
@raffaeleguidi commented on GitHub (Aug 6, 2025):
using @BenAlabaster analysis (thanks, I've been too lazy for that :D ) I created a little project to automate models download:
https://github.com/raffaeleguidi/Ollama-model-downloader
just started, still needs nodejs and npm install to run (will package it as binary), and not too well tested, but it seems to do the job and I will improve it over time
@raffaeleguidi commented on GitHub (Aug 7, 2025):
I added compiled binaries for everyone convenience. Enjoy!
@anirbanbasu commented on GitHub (Aug 21, 2025):
@BenAlabaster, thanks for the idea. I have also created a Python version of a downloader tool at https://github.com/anirbanbasu/ollama-downloader. I have now deprecated the Python version for the Rust equivalent: https://github.com/anirbanbasu/odir.
@devzom commented on GitHub (Oct 2, 2025):
I've found this workaround and does work correctly for any model to be downloaded how-to-workaround-ollama-pull-issues
@Goekdeniz-Guelmez commented on GitHub (Apr 17, 2026):
im having the same issue too, using the macOS native install