mirror of
https://github.com/ollama/ollama.git
synced 2026-05-08 08:59:13 -05:00
Closed
opened 2026-04-28 19:42:10 -05:00 by GiteaMirror
·
3 comments
No Branch/Tag Specified
main
dhiltgen/ci
dhiltgen/llama-runner
parth-launch-codex-app
hoyyeva/anthropic-local-image-path
hoyyeva/anthropic-reference-images-path
parth-anthropic-reference-images-path
brucemacd/download-before-remove
hoyyeva/editor-config-repair
parth-mlx-decode-checkpoints
hoyyeva/fix-codex-model-metadata-warning
hoyyeva/qwen
parth/hide-claude-desktop-till-release
hoyyeva/opencode-image-modality
parth-add-claude-code-autoinstall
release_v0.22.0
pdevine/manifest-list
codex/fix-codex-model-metadata-warning
pdevine/addressable-manifest
brucemacd/launch-fetch-reccomended
jmorganca/llama-compat
launch-copilot-cli
hoyyeva/opencode-thinking
release_v0.20.7
parth-auto-save-backup
parth-test
jmorganca/gemma4-audio-replacements
fix-manifest-digest-on-pull
hoyyeva/vscode-improve
brucemacd/install-server-wait
parth/update-claude-docs
brucemac/start-ap-install
pdevine/mlx-update
pdevine/qwen35_vision
drifkin/api-show-fallback
mintlify/image-generation-1773352582
hoyyeva/server-context-length-local-config
jmorganca/faster-reptition-penalties
jmorganca/convert-nemotron
parth-pi-thinking
pdevine/sampling-penalties
jmorganca/fix-create-quantization-memory
dongchen/resumable_transfer_fix
pdevine/sampling-cache-error
jessegross/mlx-usage
hoyyeva/openclaw-config
hoyyeva/app-html
pdevine/qwen3next
brucemacd/sign-sh-install
brucemacd/tui-update
brucemacd/usage-api
jmorganca/launch-empty
fix-app-dist-embed
mxyng/mlx-compile
mxyng/mlx-quant
mxyng/mlx-glm4.7
mxyng/mlx
brucemacd/simplify-model-picker
jmorganca/qwen3-concurrent
fix-glm-4.7-flash-mla-config
drifkin/qwen3-coder-opening-tag
brucemacd/usage-cli
fix-cuda12-fattn-shmem
ollama-imagegen-docs
parth/fix-multiline-inputs
brucemacd/config-docs
mxyng/model-files
mxyng/simple-execute
fix-imagegen-ollama-models
mxyng/async-upload
jmorganca/lazy-no-dtype-changes
imagegen-auto-detect-create
parth/decrease-concurrent-download-hf
fix-mlx-quantize-init
jmorganca/x-cleanup
usage
imagegen-readme
jmorganca/glm-image
mlx-gpu-cd
jmorganca/imagegen-modelfile
parth/agent-skills
parth/agent-allowlist
parth/signed-in-offline
parth/agents
parth/fix-context-chopping
improve-cloud-flow
parth/add-models-websearch
parth/prompt-renderer-mcp
jmorganca/native-settings
jmorganca/download-stream-hash
jmorganca/client2-rebased
brucemacd/oai-chat-req-multipart
jessegross/multi_chunk_reserve
grace/additional-omit-empty
grace/mistral-3-large
mxyng/tokenizer2
mxyng/tokenizer
jessegross/flash
hoyyeva/windows-nacked-app
mxyng/cleanup-attention
grace/deepseek-parser
hoyyeva/remember-unsent-prompt
parth/add-lfs-pointer-error-conversion
parth/olmo2-test2
hoyyeva/ollama-launchagent-plist
nicole/olmo-model
parth/olmo-test
mxyng/remove-embedded
parth/render-template
jmorganca/intellect-3
parth/remove-prealloc-linter
jmorganca/cmd-eval
nicole/nomic-embed-text-fix
mxyng/lint-2
hoyyeva/add-gemini-3-pro-preview
hoyyeva/load-model-list
mxyng/expand-path
mxyng/environ-2
hoyyeva/deeplink-json-encoding
parth/improve-tool-calling-tests
hoyyeva/conversation
hoyyeva/assistant-edit-response
hoyyeva/thinking
origin/brucemacd/invalid-char-i-err
parth/improve-tool-calling
jmorganca/required-omitempty
grace/qwen3-vl-tests
mxyng/iter-client
parth/docs-readme
nicole/embed-test
pdevine/integration-benchstat
parth/remove-generate-cmd
parth/add-toolcall-id
mxyng/server-tests
jmorganca/glm-4.6
jmorganca/gin-h-compat
drifkin/stable-tool-args
pdevine/qwen3-more-thinking
parth/add-websearch-client
nicole/websearch_local
jmorganca/qwen3-coder-updates
grace/deepseek-v3-migration-tests
mxyng/fix-create
jmorganca/cloud-errors
pdevine/parser-tidy
revert-12233-parth/simplify-entrypoints-runner
parth/enable-so-gpt-oss
brucemacd/qwen3vl
jmorganca/readme-simplify
parth/gpt-oss-structured-outputs
revert-12039-jmorganca/tools-braces
mxyng/embeddings
mxyng/gguf
mxyng/benchmark
mxyng/types-null
parth/move-parsing
mxyng/gemma2
jmorganca/docs
mxyng/16-bit
mxyng/create-stdin
pdevine/authorizedkeys
mxyng/quant
parth/opt-in-error-context-window
brucemacd/cache-models
brucemacd/runner-completion
jmorganca/llama-update-6
brucemacd/benchmark-list
brucemacd/partial-read-caps
parth/deepseek-r1-tools
mxyng/omit-array
parth/tool-prefix-temp
brucemacd/runner-test
jmorganca/qwen25vl
brucemacd/model-forward-test-ext
parth/python-function-parsing
jmorganca/cuda-compression-none
drifkin/num-parallel
drifkin/chat-truncation-fix
jmorganca/sync
parth/python-tools-calling
drifkin/array-head-count
brucemacd/create-no-loop
parth/server-enable-content-stream-with-tools
qwen25omni
mxyng/v3
brucemacd/ropeconfig
jmorganca/silence-tokenizer
parth/sample-so-test
parth/sampling-structured-outputs
brucemacd/doc-go-engine
parth/constrained-sampling-json
jmorganca/mistral-wip
brucemacd/mistral-small-convert
parth/sample-unmarshal-json-for-params
brucemacd/jomorganca/mistral
pdevine/bfloat16
jmorganca/mistral
brucemacd/mistral
pdevine/logging
parth/sample-correctness-fix
parth/sample-fix-sorting
jmorgan/sample-fix-sorting-extras
jmorganca/temp-0-images
brucemacd/parallel-embed-models
brucemacd/shim-grammar
jmorganca/fix-gguf-error
bmizerany/nameswork
jmorganca/faster-releases
bmizerany/validatenames
brucemacd/err-no-vocab
brucemacd/rope-config
brucemacd/err-hint
brucemacd/qwen2_5
brucemacd/logprobs
brucemacd/new_runner_graph_bench
progress-flicker
brucemacd/forward-test
brucemacd/go_qwen2
pdevine/gemma2
jmorganca/add-missing-symlink-eval
mxyng/next-debug
parth/set-context-size-openai
brucemacd/next-bpe-bench
brucemacd/next-bpe-test
brucemacd/new_runner_e2e
brucemacd/new_runner_qwen2
pdevine/convert-cohere2
brucemacd/convert-cli
parth/log-probs
mxyng/next-mlx
mxyng/cmd-history
parth/templating
parth/tokenize-detokenize
brucemacd/check-key-register
bmizerany/grammar
jmorganca/vendor-081b29bd
mxyng/func-checks
jmorganca/fix-null-format
parth/fix-default-to-warn-json
jmorganca/qwen2vl
jmorganca/no-concat
parth/cmd-cleanup-SO
brucemacd/check-key-register-structured-err
parth/openai-stream-usage
parth/fix-referencing-so
stream-tools-stop
jmorganca/degin-1
brucemacd/install-path-clean
brucemacd/push-name-validation
brucemacd/browser-key-register
jmorganca/openai-fix-first-message
jmorganca/fix-proxy
jessegross/sample
parth/disallow-streaming-tools
dhiltgen/remove_submodule
jmorganca/ga
jmorganca/mllama
pdevine/newlines
pdevine/geems-2b
jmorganca/llama-bump
mxyng/modelname-7
mxyng/gin-slog
mxyng/modelname-6
jyan/convert-prog
jyan/quant5
paligemma-support
pdevine/import-docs
jmorganca/openai-context
jyan/paligemma
jyan/p2
jyan/palitest
bmizerany/embedspeedup
jmorganca/llama-vit
brucemacd/allow-ollama
royh/ep-methods
royh/whisper
mxyng/api-models
mxyng/fix-memory
jyan/q4_4/8
jyan/ollama-v
royh/stream-tools
roy-embed-parallel
bmizerany/hrm
revert-5963-revert-5924-mxyng/llama3.1-rope
royh/embed-viz
jyan/local2
jyan/auth
jyan/local
jyan/parse-temp
jmorganca/template-mistral
jyan/reord-g
royh-openai-suffixdocs
royh-imgembed
royh-embed-parallel
jyan/quant4
royh-precision
jyan/progress
pdevine/fix-template
jyan/quant3
pdevine/ggla
mxyng/update-registry-domain
jmorganca/ggml-static
mxyng/create-context
jyan/v0.146
mxyng/layers-from-files
build_dist
bmizerany/noseek
royh-ls
royh-name
timeout
mxyng/server-timestamp
bmizerany/nosillyggufslurps
royh-params
jmorganca/llama-cpp-7c26775
royh-openai-delete
royh-show-rigid
jmorganca/enable-fa
jmorganca/no-error-template
jyan/format
royh-testdelete
bmizerany/fastverify
language_support
pdevine/ps-glitches
brucemacd/tokenize
bruce/iq-quants
bmizerany/filepathwithcoloninhost
mxyng/split-bin
bmizerany/client-registry
jmorganca/if-none-match
native
jmorganca/native
jmorganca/batch-embeddings
jmorganca/initcmake
jmorganca/mm
pdevine/showggmlinfo
modenameenforcealphanum
bmizerany/modenameenforcealphanum
jmorganca/done-reason
jmorganca/llama-cpp-8960fe8
ollama.com
bmizerany/filepathnobuild
bmizerany/types/model/defaultfix
rmdisplaylong
nogogen
bmizerany/x
modelfile-readme
bmizerany/replacecolon
jmorganca/limit
jmorganca/execstack
jmorganca/replace-assets
mxyng/tune-concurrency
jmorganca/testing
whitespace-detection
jmorganca/options
upgrade-all
scratch
cuda-search
mattw/airenamer
mattw/allmodelsonhuggingface
mattw/quantcontext
mattw/whatneedstorun
brucemacd/llama-mem-calc
mattw/faq-context
mattw/communitylinks
mattw/noprune
mattw/python-functioncalling
rename
mxyng/install
pulse
remove-first
editor
mattw/selfqueryingretrieval
cgo
mattw/howtoquant
api
matt/streamingapi
format-config
mxyng/extra-args
shell
update-nous-hermes
cp-model
upload-progress
fix-unknown-model
fix-model-names
delete-fix
insecure-registry
ls
deletemodels
progressbar
readme-updates
license-layers
skip-list
list-models
modelpath
matt/examplemodelfiles
distribution
go-opts
v0.30.0-rc7
v0.30.0-rc6
v0.30.0-rc5
v0.23.2
v0.23.2-rc0
v0.30.0-rc4
v0.30.0-rc3
v0.30.0-rc2
v0.30.0-rc1
v0.30.0-rc0
v0.23.1
v0.23.1-rc0
v0.23.0
v0.23.0-rc0
v0.22.1
v0.22.1-rc1
v0.22.1-rc0
v0.22.0
v0.22.0-rc1
v0.21.3-rc0
v0.21.2-rc1
v0.21.2
v0.21.2-rc0
v0.21.1
v0.21.1-rc1
v0.21.1-rc0
v0.21.0
v0.21.0-rc1
v0.21.0-rc0
v0.20.8-rc0
v0.20.7
v0.20.7-rc1
v0.20.7-rc0
v0.20.6
v0.20.6-rc1
v0.20.6-rc0
v0.20.5
v0.20.5-rc2
v0.20.5-rc1
v0.20.5-rc0
v0.20.4
v0.20.4-rc2
v0.20.4-rc1
v0.20.4-rc0
v0.20.3
v0.20.3-rc0
v0.20.2
v0.20.1
v0.20.1-rc2
v0.20.1-rc1
v0.20.1-rc0
v0.20.0
v0.20.0-rc1
v0.20.0-rc0
v0.19.0
v0.19.0-rc2
v0.19.0-rc1
v0.19.0-rc0
v0.18.4-rc1
v0.18.4-rc0
v0.18.3
v0.18.3-rc2
v0.18.3-rc1
v0.18.3-rc0
v0.18.2
v0.18.2-rc1
v0.18.2-rc0
v0.18.1
v0.18.1-rc1
v0.18.1-rc0
v0.18.0
v0.18.0-rc2
v0.18.0-rc1
v0.18.0-rc0
v0.17.8-rc4
v0.17.8-rc3
v0.17.8-rc2
v0.17.8-rc1
v0.17.8-rc0
v0.17.7
v0.17.7-rc2
v0.17.7-rc1
v0.17.7-rc0
v0.17.6
v0.17.5
v0.17.4
v0.17.3
v0.17.2
v0.17.1
v0.17.1-rc2
v0.17.1-rc1
v0.17.1-rc0
v0.17.0
v0.17.0-rc2
v0.17.0-rc1
v0.17.0-rc0
v0.16.3
v0.16.3-rc2
v0.16.3-rc1
v0.16.3-rc0
v0.16.2
v0.16.2-rc0
v0.16.1
v0.16.0
v0.16.0-rc2
v0.16.0-rc0
v0.16.0-rc1
v0.15.6
v0.15.5
v0.15.5-rc5
v0.15.5-rc4
v0.15.5-rc3
v0.15.5-rc2
v0.15.5-rc1
v0.15.5-rc0
v0.15.4
v0.15.3
v0.15.2
v0.15.1
v0.15.1-rc1
v0.15.1-rc0
v0.15.0-rc6
v0.15.0
v0.15.0-rc5
v0.15.0-rc4
v0.15.0-rc3
v0.15.0-rc2
v0.15.0-rc1
v0.15.0-rc0
v0.14.3
v0.14.3-rc3
v0.14.3-rc2
v0.14.3-rc1
v0.14.3-rc0
v0.14.2
v0.14.2-rc1
v0.14.2-rc0
v0.14.1
v0.14.0-rc11
v0.14.0
v0.14.0-rc10
v0.14.0-rc9
v0.14.0-rc8
v0.14.0-rc7
v0.14.0-rc6
v0.14.0-rc5
v0.14.0-rc4
v0.14.0-rc3
v0.14.0-rc2
v0.14.0-rc1
v0.14.0-rc0
v0.13.5
v0.13.5-rc1
v0.13.5-rc0
v0.13.4-rc2
v0.13.4
v0.13.4-rc1
v0.13.4-rc0
v0.13.3
v0.13.3-rc1
v0.13.3-rc0
v0.13.2
v0.13.2-rc2
v0.13.2-rc1
v0.13.2-rc0
v0.13.1
v0.13.1-rc2
v0.13.1-rc1
v0.13.1-rc0
v0.13.0
v0.13.0-rc0
v0.12.11
v0.12.11-rc1
v0.12.11-rc0
v0.12.10
v0.12.10-rc1
v0.12.10-rc0
v0.12.9-rc0
v0.12.9
v0.12.8
v0.12.8-rc0
v0.12.7
v0.12.7-rc1
v0.12.7-rc0
v0.12.7-citest0
v0.12.6
v0.12.6-rc1
v0.12.6-rc0
v0.12.5
v0.12.5-rc0
v0.12.4
v0.12.4-rc7
v0.12.4-rc6
v0.12.4-rc5
v0.12.4-rc4
v0.12.4-rc3
v0.12.4-rc2
v0.12.4-rc1
v0.12.4-rc0
v0.12.3
v0.12.2
v0.12.2-rc0
v0.12.1
v0.12.1-rc1
v0.12.1-rc2
v0.12.1-rc0
v0.12.0
v0.12.0-rc1
v0.12.0-rc0
v0.11.11
v0.11.11-rc3
v0.11.11-rc2
v0.11.11-rc1
v0.11.11-rc0
v0.11.10
v0.11.9
v0.11.9-rc0
v0.11.8
v0.11.8-rc0
v0.11.7-rc1
v0.11.7-rc0
v0.11.7
v0.11.6
v0.11.6-rc0
v0.11.5-rc4
v0.11.5-rc3
v0.11.5
v0.11.5-rc5
v0.11.5-rc2
v0.11.5-rc1
v0.11.5-rc0
v0.11.4
v0.11.4-rc0
v0.11.3
v0.11.3-rc0
v0.11.2
v0.11.1
v0.11.0-rc0
v0.11.0-rc1
v0.11.0-rc2
v0.11.0
v0.10.2-int1
v0.10.1
v0.10.0
v0.10.0-rc4
v0.10.0-rc3
v0.10.0-rc2
v0.10.0-rc1
v0.10.0-rc0
v0.9.7-rc1
v0.9.7-rc0
v0.9.6
v0.9.6-rc0
v0.9.6-ci0
v0.9.5
v0.9.4-rc5
v0.9.4-rc6
v0.9.4
v0.9.4-rc3
v0.9.4-rc4
v0.9.4-rc1
v0.9.4-rc2
v0.9.4-rc0
v0.9.3
v0.9.3-rc5
v0.9.4-citest0
v0.9.3-rc4
v0.9.3-rc3
v0.9.3-rc2
v0.9.3-rc1
v0.9.3-rc0
v0.9.2
v0.9.1
v0.9.1-rc1
v0.9.1-rc0
v0.9.1-ci1
v0.9.1-ci0
v0.9.0
v0.9.0-rc0
v0.8.0
v0.8.0-rc0
v0.7.1-rc2
v0.7.1
v0.7.1-rc1
v0.7.1-rc0
v0.7.0
v0.7.0-rc1
v0.7.0-rc0
v0.6.9-rc0
v0.6.8
v0.6.8-rc0
v0.6.7
v0.6.7-rc2
v0.6.7-rc1
v0.6.7-rc0
v0.6.6
v0.6.6-rc2
v0.6.6-rc1
v0.6.6-rc0
v0.6.5-rc1
v0.6.5
v0.6.5-rc0
v0.6.4-rc0
v0.6.4
v0.6.3-rc1
v0.6.3
v0.6.3-rc0
v0.6.2
v0.6.2-rc0
v0.6.1
v0.6.1-rc0
v0.6.0-rc0
v0.6.0
v0.5.14-rc0
v0.5.13
v0.5.13-rc6
v0.5.13-rc5
v0.5.13-rc4
v0.5.13-rc3
v0.5.13-rc2
v0.5.13-rc1
v0.5.13-rc0
v0.5.12
v0.5.12-rc1
v0.5.12-rc0
v0.5.11
v0.5.10
v0.5.9
v0.5.9-rc0
v0.5.8-rc13
v0.5.8
v0.5.8-rc12
v0.5.8-rc11
v0.5.8-rc10
v0.5.8-rc9
v0.5.8-rc8
v0.5.8-rc7
v0.5.8-rc6
v0.5.8-rc5
v0.5.8-rc4
v0.5.8-rc3
v0.5.8-rc2
v0.5.8-rc1
v0.5.8-rc0
v0.5.7
v0.5.6
v0.5.5
v0.5.5-rc0
v0.5.4
v0.5.3
v0.5.3-rc0
v0.5.2
v0.5.2-rc3
v0.5.2-rc2
v0.5.2-rc1
v0.5.2-rc0
v0.5.1
v0.5.0
v0.5.0-rc1
v0.4.8-rc0
v0.4.7
v0.4.6
v0.4.5
v0.4.4
v0.4.3
v0.4.3-rc0
v0.4.2
v0.4.2-rc1
v0.4.2-rc0
v0.4.1
v0.4.1-rc0
v0.4.0
v0.4.0-rc8
v0.4.0-rc7
v0.4.0-rc6
v0.4.0-rc5
v0.4.0-rc4
v0.4.0-rc3
v0.4.0-rc2
v0.4.0-rc1
v0.4.0-rc0
v0.4.0-ci3
v0.3.14
v0.3.14-rc0
v0.3.13
v0.3.12
v0.3.12-rc5
v0.3.12-rc4
v0.3.12-rc3
v0.3.12-rc2
v0.3.12-rc1
v0.3.11
v0.3.11-rc4
v0.3.11-rc3
v0.3.11-rc2
v0.3.11-rc1
v0.3.10
v0.3.10-rc1
v0.3.9
v0.3.8
v0.3.7
v0.3.7-rc6
v0.3.7-rc5
v0.3.7-rc4
v0.3.7-rc3
v0.3.7-rc2
v0.3.7-rc1
v0.3.6
v0.3.5
v0.3.4
v0.3.3
v0.3.2
v0.3.1
v0.3.0
v0.2.8
v0.2.8-rc2
v0.2.8-rc1
v0.2.7
v0.2.6
v0.2.5
v0.2.4
v0.2.3
v0.2.2
v0.2.2-rc2
v0.2.2-rc1
v0.2.1
v0.2.0
v0.1.49-rc14
v0.1.49-rc13
v0.1.49-rc12
v0.1.49-rc11
v0.1.49-rc10
v0.1.49-rc9
v0.1.49-rc8
v0.1.49-rc7
v0.1.49-rc6
v0.1.49-rc4
v0.1.49-rc5
v0.1.49-rc3
v0.1.49-rc2
v0.1.49-rc1
v0.1.48
v0.1.47
v0.1.46
v0.1.45-rc5
v0.1.45
v0.1.45-rc4
v0.1.45-rc3
v0.1.45-rc2
v0.1.45-rc1
v0.1.44
v0.1.43
v0.1.42
v0.1.41
v0.1.40
v0.1.40-rc1
v0.1.39
v0.1.39-rc2
v0.1.39-rc1
v0.1.38
v0.1.37
v0.1.36
v0.1.35
v0.1.35-rc1
v0.1.34
v0.1.34-rc1
v0.1.33
v0.1.33-rc7
v0.1.33-rc6
v0.1.33-rc5
v0.1.33-rc4
v0.1.33-rc3
v0.1.33-rc2
v0.1.33-rc1
v0.1.32
v0.1.32-rc2
v0.1.32-rc1
v0.1.31
v0.1.30
v0.1.29
v0.1.28
v0.1.27
v0.1.26
v0.1.25
v0.1.24
v0.1.23
v0.1.22
v0.1.21
v0.1.20
v0.1.19
v0.1.18
v0.1.17
v0.1.16
v0.1.15
v0.1.14
v0.1.13
v0.1.12
v0.1.11
v0.1.10
v0.1.9
v0.1.8
v0.1.7
v0.1.6
v0.1.5
v0.1.4
v0.1.3
v0.1.2
v0.1.1
v0.1.0
v0.0.21
v0.0.20
v0.0.19
v0.0.18
v0.0.17
v0.0.16
v0.0.15
v0.0.14
v0.0.13
v0.0.12
v0.0.11
v0.0.10
v0.0.9
v0.0.8
v0.0.7
v0.0.6
v0.0.5
v0.0.4
v0.0.3
v0.0.2
v0.0.1
Labels
Clear labels
amd
api
app
bug
build
cli
cloud
compatibility
context-length
create
docker
documentation
embeddings
feature request
feedback wanted
good first issue
gpt-oss
gpu
harmony
help wanted
image
install
intel
js
launch
linux
macos
memory
mlx
model
needs more info
networking
nvidia
ollama.com
performance
pull-request
python
question
registry
rendering
thinking
tools
top
vulkan
windows
wsl
Mirrored from GitHub Pull Request
No Label
needs more info
Milestone
No items
No Milestone
Projects
Clear projects
No project
No Assignees
Notifications
Due Date
No due date set.
Dependencies
No dependencies set.
Reference: github-starred/ollama#51367
Reference in New Issue
Block a user
Blocking a user prevents them from interacting with repositories, such as opening or commenting on pull requests or issues. Learn more about blocking a user.
Delete Branch "%!s()"
Deleting a branch is permanent. Although the deleted branch may continue to exist for a short time before it actually gets removed, it CANNOT be undone in most cases. Continue?
Originally created by @AncientMystic on GitHub (Nov 11, 2024).
Original GitHub issue: https://github.com/ollama/ollama/issues/7610
Testing different models, mainly gemma 2, i have been receiving a lot of blank responses (no line, no spacing, just blank no characters at all), usually a few regens fixes it but sometimes it takes quite a few (once took 60x regenerating on my laptop instance to move on and generate a response) thought it might have been open-webui possibly something i had configured, but i have just ran into it happening in the terminal with ollama directly. So i think this is either a bug with the new ollama or how it handles gemma 2 and a few other models which i think are also gemma based.
(Doesn't always happen but randomly happens sometimes, sometimes its the 1st or 2nd response sometimes its the 15th, just random zero byte responses come up here and there)
@rick-github commented on GitHub (Nov 11, 2024):
Server logs may help in debugging. Examples of prompts, information about the system, etc. will also help.
@AncientMystic commented on GitHub (Nov 11, 2024):
it is occurring on two separate systems, one is my laptop, windows 10 with a new install of the latest ollama and open-webui in docker.
another which is my main setup is my server/workstation running on proxmox with vGPU in a windows 10 now 11 guest OS.
the prompts have been random some as simple as hello or write a sentence / write a paragraph just as a test prompt.
as i said in the initial post, it happened within terminal the last time so i know now open-webui and anything on top are not the issue here either, i wanted to make sure i eliminated that possibility first.
Here are some details:
server:
Server Setup:
Host OS: Proxmox 8.2.7Hardware:
CPU: i7-7820X
RAM: 96GB DDR4 2133mhz
GPU 1: GTX 1060 3GB
GPU 2: Intel Arc A310 4GB
GPU 3: Tesla P4 8GB (used for ollama)
MAIN Drive: 1TB WD Blue SN550 NVME
2ND Drive: 1TB WD Blue SA510
3RD/4TH: Drive: 10TB HGST Sata enterprise HDD
5TH: 12TB HGST Sata Enterprise HDD
Guest OS:
Windows 11 24H2:
CPU: full core allocation to use 100% of host cpu
Ram: 60GB
vGPU: 8GB from tesla p4
Ollama in guest vm, compiled to use avx512 and k/v quantization as well as a few other tweaks/fixes. ( why i also test it on my laptop without any of these things to ensure none of them are the root issue)
open-webui running in Docker within an LXC on proxmox host
Server.log from server workstation when it has just happened again:
server.log
2024/11/11 18:16:54 routes.go:1189: INFO server config env="map[CUDA_VISIBLE_DEVICES: GPU_DEVICE_ORDINAL: HIP_VISIBLE_DEVICES: HSA_OVERRIDE_GFX_VERSION: HTTPS_PROXY: HTTP_PROXY: NO_PROXY: OLLAMA_CACHE_TYPE_K:q8_0 OLLAMA_CACHE_TYPE_V:q8_0 OLLAMA_DEBUG:false OLLAMA_FLASH_ATTENTION:true OLLAMA_GPU_OVERHEAD:0 OLLAMA_HOST:http://0.0.0.0:11434 OLLAMA_INTEL_GPU:true OLLAMA_KEEP_ALIVE:5m0s OLLAMA_LLM_LIBRARY: OLLAMA_LOAD_TIMEOUT:5m0s OLLAMA_MAX_LOADED_MODELS:0 OLLAMA_MAX_QUEUE:512 OLLAMA_MODELS:C:\\Users\\VMZ\\.ollama\\models OLLAMA_MULTIUSER_CACHE:false OLLAMA_NOHISTORY:false OLLAMA_NOPRUNE:false OLLAMA_NUM_PARALLEL:0 OLLAMA_ORIGINS:[http://localhost https://localhost http://localhost:* https://localhost:* http://127.0.0.1 https://127.0.0.1 http://127.0.0.1:* https://127.0.0.1:* http://0.0.0.0 https://0.0.0.0 http://0.0.0.0:* https://0.0.0.0:* app://* file://* tauri://* vscode-webview://*] OLLAMA_SCHED_SPREAD:true OLLAMA_TMPDIR: ROCR_VISIBLE_DEVICES:]"time=2024-11-11T18:16:54.234Z level=INFO source=images.go:754 msg="total blobs: 310"
time=2024-11-11T18:16:54.309Z level=INFO source=images.go:761 msg="total unused blobs removed: 0"
[GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.
[GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production.
- using env: export GIN_MODE=release
- using code: gin.SetMode(gin.ReleaseMode)
[GIN-debug] POST /api/pull --> github.com/ollama/ollama/server.(*Server).PullHandler-fm (5 handlers)
[GIN-debug] POST /api/generate --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (5 handlers)
[GIN-debug] POST /api/chat --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (5 handlers)
[GIN-debug] POST /api/embed --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (5 handlers)
[GIN-debug] POST /api/embeddings --> github.com/ollama/ollama/server.(*Server).EmbeddingsHandler-fm (5 handlers)
[GIN-debug] POST /api/create --> github.com/ollama/ollama/server.(*Server).CreateHandler-fm (5 handlers)
[GIN-debug] POST /api/push --> github.com/ollama/ollama/server.(*Server).PushHandler-fm (5 handlers)
[GIN-debug] POST /api/copy --> github.com/ollama/ollama/server.(*Server).CopyHandler-fm (5 handlers)
[GIN-debug] DELETE /api/delete --> github.com/ollama/ollama/server.(*Server).DeleteHandler-fm (5 handlers)
[GIN-debug] POST /api/show --> github.com/ollama/ollama/server.(*Server).ShowHandler-fm (5 handlers)
[GIN-debug] POST /api/blobs/:digest --> github.com/ollama/ollama/server.(*Server).CreateBlobHandler-fm (5 handlers)
[GIN-debug] HEAD /api/blobs/:digest --> github.com/ollama/ollama/server.(*Server).HeadBlobHandler-fm (5 handlers)
[GIN-debug] GET /api/ps --> github.com/ollama/ollama/server.(*Server).PsHandler-fm (5 handlers)
[GIN-debug] POST /v1/chat/completions --> github.com/ollama/ollama/server.(*Server).ChatHandler-fm (6 handlers)
[GIN-debug] POST /v1/completions --> github.com/ollama/ollama/server.(*Server).GenerateHandler-fm (6 handlers)
[GIN-debug] POST /v1/embeddings --> github.com/ollama/ollama/server.(*Server).EmbedHandler-fm (6 handlers)
[GIN-debug] GET /v1/models --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (6 handlers)
[GIN-debug] GET /v1/models/:model --> github.com/ollama/ollama/server.(*Server).ShowHandler-fm (6 handlers)
[GIN-debug] GET / --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] GET /api/tags --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (5 handlers)
[GIN-debug] GET /api/version --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
[GIN-debug] HEAD / --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func1 (5 handlers)
[GIN-debug] HEAD /api/tags --> github.com/ollama/ollama/server.(*Server).ListHandler-fm (5 handlers)
[GIN-debug] HEAD /api/version --> github.com/ollama/ollama/server.(*Server).GenerateRoutes.func2 (5 handlers)
time=2024-11-11T18:16:54.333Z level=INFO source=routes.go:1236 msg="Listening on [::]:11434 (version 0.0.0)"
time=2024-11-11T18:16:54.352Z level=INFO source=common.go:49 msg="Dynamic LLM libraries" runners="[cpu_avx2 cpu_avx512 cuda_v12 rocm cpu cuda_v11 cuda_v12_avx cuda_v12_avx,_avx2,_avx512 cpu_avx]"
time=2024-11-11T18:16:54.352Z level=INFO source=gpu.go:221 msg="looking for compatible GPUs"
time=2024-11-11T18:16:54.353Z level=INFO source=gpu_windows.go:167 msg=packages count=1
time=2024-11-11T18:16:54.353Z level=INFO source=gpu_windows.go:214 msg="" package=0 cores=16 efficiency=0 threads=16
time=2024-11-11T18:16:56.273Z level=INFO source=gpu.go:326 msg="detected OS VRAM overhead" id=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda compute=6.1 driver=12.2 name="GRID P40-8A" overhead="363.1 MiB"
time=2024-11-11T18:16:59.992Z level=INFO source=types.go:123 msg="inference compute" id=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda variant=v12 compute=6.1 driver=12.2 name="GRID P40-8A" total="8.0 GiB" available="6.6 GiB"
time=2024-11-11T18:16:59.993Z level=INFO source=types.go:123 msg="inference compute" id=0 library=oneapi variant="" compute="" driver=0.0 name="\xf7\x7f" total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:45:10.249Z level=INFO source=sched.go:730 msg="new model will fit in available VRAM, loading" model=C:\Users\VMZ\.ollama\models\blobs\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 library=cuda parallel=1 required="1.1 GiB"
time=2024-11-11T19:45:10.342Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="48.2 GiB" free_swap="63.8 GiB"
time=2024-11-11T19:45:10.342Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=25 layers.offload=25 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="1.1 GiB" memory.required.partial="1.1 GiB" memory.required.kv="12.0 MiB" memory.required.allocations="[1.1 GiB]" memory.weights.total="589.2 MiB" memory.weights.repeating="529.6 MiB" memory.weights.nonrepeating="59.6 MiB" memory.graph.full="32.0 MiB" memory.graph.partial="32.0 MiB"
time=2024-11-11T19:45:10.345Z level=INFO source=server.go:305 msg="Flash attention not enabled"
time=2024-11-11T19:45:10.353Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 --ctx-size 2048 --batch-size 512 --embedding --n-gpu-layers 25 --threads 16 --no-mmap --parallel 1 --port 53041"
time=2024-11-11T19:45:10.503Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:45:10.503Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:45:10.504Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:45:15.042Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:45:15.043Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:45:15.044Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53041"
llama_model_loader: loaded meta data with 20 key-value pairs and 389 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = bert
llama_model_loader: - kv 1: general.name str = snowflake-arctic-embed-l
llama_model_loader: - kv 2: bert.block_count u32 = 24
llama_model_loader: - kv 3: bert.context_length u32 = 512
llama_model_loader: - kv 4: bert.embedding_length u32 = 1024
llama_model_loader: - kv 5: bert.feed_forward_length u32 = 4096
llama_model_loader: - kv 6: bert.attention.head_count u32 = 16
llama_model_loader: - kv 7: bert.attention.layer_norm_epsilon f32 = 0.000000
llama_model_loader: - kv 8: general.file_type u32 = 1
llama_model_loader: - kv 9: bert.attention.causal bool = false
llama_model_loader: - kv 10: bert.pooling_type u32 = 2
llama_model_loader: - kv 11: tokenizer.ggml.token_type_count u32 = 2
llama_model_loader: - kv 12: tokenizer.ggml.model str = bert
llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,30522] = ["[PAD]", "[unused0]", "[unused1]", "...
llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,30522] = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv 15: tokenizer.ggml.unknown_token_id u32 = 100
llama_model_loader: - kv 16: tokenizer.ggml.seperator_token_id u32 = 102
llama_model_loader: - kv 17: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 18: tokenizer.ggml.cls_token_id u32 = 101
llama_model_loader: - kv 19: tokenizer.ggml.mask_token_id u32 = 103
llama_model_loader: - type f32: 243 tensors
llama_model_loader: - type f16: 146 tensors
llm_load_vocab: special tokens cache size = 5
llm_load_vocab: token to piece cache size = 0.2032 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = bert
llm_load_print_meta: vocab type = WPM
llm_load_print_meta: n_vocab = 30522
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 512
llm_load_print_meta: n_embd = 1024
llm_load_print_meta: n_layer = 24
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 16
llm_load_print_meta: n_rot = 64
llm_load_print_meta: n_swa = 0
llm_load_print_meta: n_embd_head_k = 64
llm_load_print_meta: n_embd_head_v = 64
llm_load_print_meta: n_gqa = 1
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 1.0e-12
llm_load_print_meta: f_norm_rms_eps = 0.0e+00
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 4096
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 0
llm_load_print_meta: pooling type = 2
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 512
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 335M
llm_load_print_meta: model ftype = F16
llm_load_print_meta: model params = 334.09 M
llm_load_print_meta: model size = 637.85 MiB (16.02 BPW)
llm_load_print_meta: general.name = snowflake-arctic-embed-l
llm_load_print_meta: UNK token = 100 '[UNK]'
llm_load_print_meta: SEP token = 102 '[SEP]'
llm_load_print_meta: PAD token = 0 '[PAD]'
llm_load_print_meta: CLS token = 101 '[CLS]'
llm_load_print_meta: MASK token = 103 '[MASK]'
llm_load_print_meta: LF token = 0 '[PAD]'
llm_load_print_meta: max token length = 21
time=2024-11-11T19:45:15.274Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.32 MiB
llm_load_tensors: offloading 24 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 25/25 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 60.62 MiB
llm_load_tensors: CUDA0 buffer size = 577.23 MiB
llama_new_context_with_model: n_ctx = 2048
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA0 KV buffer size = 192.00 MiB
llama_new_context_with_model: KV self size = 192.00 MiB, K (f16): 96.00 MiB, V (f16): 96.00 MiB
llama_new_context_with_model: CPU output buffer size = 0.00 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 25.01 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 5.01 MiB
llama_new_context_with_model: graph nodes = 849
llama_new_context_with_model: graph splits = 2
time=2024-11-11T19:45:24.050Z level=INFO source=server.go:685 msg="llama runner started in 13.55 seconds"
llama_model_loader: loaded meta data with 20 key-value pairs and 389 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = bert
llama_model_loader: - kv 1: general.name str = snowflake-arctic-embed-l
llama_model_loader: - kv 2: bert.block_count u32 = 24
llama_model_loader: - kv 3: bert.context_length u32 = 512
llama_model_loader: - kv 4: bert.embedding_length u32 = 1024
llama_model_loader: - kv 5: bert.feed_forward_length u32 = 4096
llama_model_loader: - kv 6: bert.attention.head_count u32 = 16
llama_model_loader: - kv 7: bert.attention.layer_norm_epsilon f32 = 0.000000
llama_model_loader: - kv 8: general.file_type u32 = 1
llama_model_loader: - kv 9: bert.attention.causal bool = false
llama_model_loader: - kv 10: bert.pooling_type u32 = 2
llama_model_loader: - kv 11: tokenizer.ggml.token_type_count u32 = 2
llama_model_loader: - kv 12: tokenizer.ggml.model str = bert
llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,30522] = ["[PAD]", "[unused0]", "[unused1]", "...
llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,30522] = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv 15: tokenizer.ggml.unknown_token_id u32 = 100
llama_model_loader: - kv 16: tokenizer.ggml.seperator_token_id u32 = 102
llama_model_loader: - kv 17: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 18: tokenizer.ggml.cls_token_id u32 = 101
llama_model_loader: - kv 19: tokenizer.ggml.mask_token_id u32 = 103
llama_model_loader: - type f32: 243 tensors
llama_model_loader: - type f16: 146 tensors
llm_load_vocab: special tokens cache size = 5
llm_load_vocab: token to piece cache size = 0.2032 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = bert
llm_load_print_meta: vocab type = WPM
llm_load_print_meta: n_vocab = 30522
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 334.09 M
llm_load_print_meta: model size = 637.85 MiB (16.02 BPW)
llm_load_print_meta: general.name = snowflake-arctic-embed-l
llm_load_print_meta: UNK token = 100 '[UNK]'
llm_load_print_meta: SEP token = 102 '[SEP]'
llm_load_print_meta: PAD token = 0 '[PAD]'
llm_load_print_meta: CLS token = 101 '[CLS]'
llm_load_print_meta: MASK token = 103 '[MASK]'
llm_load_print_meta: LF token = 0 '[PAD]'
llm_load_print_meta: max token length = 21
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:45:31 | 200 | 21.7616034s | 10.0.0.220 | POST "/api/embed"
time=2024-11-11T19:45:31.969Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="5.5 GiB"
time=2024-11-11T19:45:31.969Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:45:32.774Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="48.2 GiB" free_swap="63.7 GiB"
time=2024-11-11T19:45:32.776Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=43 layers.offload=22 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="11.7 GiB" memory.required.partial="6.5 GiB" memory.required.kv="672.0 MiB" memory.required.allocations="[6.5 GiB]" memory.weights.total="8.9 GiB" memory.weights.repeating="8.0 GiB" memory.weights.nonrepeating="929.7 MiB" memory.graph.full="507.0 MiB" memory.graph.partial="1.2 GiB"
time=2024-11-11T19:45:32.777Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:45:32.780Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af --ctx-size 4096 --batch-size 512 --embedding --n-gpu-layers 22 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 1 --port 53069"
time=2024-11-11T19:45:32.783Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:45:32.783Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:45:32.783Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:45:32.936Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:45:32.937Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:45:32.938Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53069"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
time=2024-11-11T19:45:33.035Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 3584
llm_load_print_meta: n_layer = 42
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 8
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 2048
llm_load_print_meta: n_embd_v_gqa = 2048
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 14336
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 9B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.41 MiB
llm_load_tensors: offloading 22 repeating layers to GPU
llm_load_tensors: offloaded 22/43 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 5876.73 MiB
llm_load_tensors: CUDA0 buffer size = 4419.08 MiB
llama_new_context_with_model: n_ctx = 4096
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA_Host KV buffer size = 340.00 MiB
llama_kv_cache_init: CUDA0 KV buffer size = 374.00 MiB
llama_new_context_with_model: KV self size = 714.00 MiB, K (q8_0): 357.00 MiB, V (q8_0): 357.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.99 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 1436.69 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 45.01 MiB
llama_new_context_with_model: graph nodes = 1398
llama_new_context_with_model: graph splits = 292
time=2024-11-11T19:45:57.112Z level=INFO source=server.go:685 msg="llama runner started in 24.33 seconds"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:46:17 | 200 | 45.6313258s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:46:17.616Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="318.3 MiB"
time=2024-11-11T19:46:17.616Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:46:21.266Z level=INFO source=sched.go:730 msg="new model will fit in available VRAM, loading" model=C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 library=cuda parallel=4 required="4.4 GiB"
time=2024-11-11T19:46:21.404Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="48.2 GiB" free_swap="63.7 GiB"
time=2024-11-11T19:46:21.405Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=27 layers.offload=27 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="4.4 GiB" memory.required.partial="4.4 GiB" memory.required.kv="832.0 MiB" memory.required.allocations="[4.4 GiB]" memory.weights.total="2.8 GiB" memory.weights.repeating="2.2 GiB" memory.weights.nonrepeating="597.7 MiB" memory.graph.full="504.5 MiB" memory.graph.partial="965.9 MiB"
time=2024-11-11T19:46:21.407Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:46:21.410Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 --ctx-size 16384 --batch-size 512 --embedding --n-gpu-layers 27 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 4 --port 53110"
time=2024-11-11T19:46:21.413Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:46:21.413Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:46:21.414Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:46:21.570Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:46:21.570Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:46:21.571Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53110"
llama_model_loader: loaded meta data with 40 key-value pairs and 288 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 2b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 2B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 2304
llama_model_loader: - kv 11: gemma2.block_count u32 = 26
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 9216
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 8
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
time=2024-11-11T19:46:21.666Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-2b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 182
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 105 tensors
llama_model_loader: - type q8_0: 183 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 249
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 2304
llm_load_print_meta: n_layer = 26
llm_load_print_meta: n_head = 8
llm_load_print_meta: n_head_kv = 4
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 9216
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 2B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 2.61 B
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 2b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.26 MiB
llm_load_tensors: offloading 26 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 27/27 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 597.66 MiB
llm_load_tensors: CUDA0 buffer size = 2649.78 MiB
llama_new_context_with_model: n_ctx = 16384
llama_new_context_with_model: n_batch = 2048
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA0 KV buffer size = 884.00 MiB
llama_new_context_with_model: KV self size = 884.00 MiB, K (q8_0): 442.00 MiB, V (q8_0): 442.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 3.94 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 504.50 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 97.01 MiB
llama_new_context_with_model: graph nodes = 870
llama_new_context_with_model: graph splits = 54
time=2024-11-11T19:46:29.691Z level=INFO source=server.go:685 msg="llama runner started in 8.28 seconds"
llama_model_loader: loaded meta data with 40 key-value pairs and 288 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 2b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 2B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 2304
llama_model_loader: - kv 11: gemma2.block_count u32 = 26
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 9216
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 8
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-2b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 182
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 105 tensors
llama_model_loader: - type q8_0: 183 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 249
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 2.61 B
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 2b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:46:56 | 200 | 39.7349276s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:46:57.114Z level=WARN source=types.go:509 msg="invalid option provided" option=stream_response
time=2024-11-11T19:46:57.387Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="2.5 GiB"
time=2024-11-11T19:46:57.387Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:46:58.387Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="48.2 GiB" free_swap="63.7 GiB"
time=2024-11-11T19:46:58.389Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=33 layers.model=43 layers.offload=20 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="12.3 GiB" memory.required.partial="6.4 GiB" memory.required.kv="1.3 GiB" memory.required.allocations="[6.4 GiB]" memory.weights.total="9.6 GiB" memory.weights.repeating="8.6 GiB" memory.weights.nonrepeating="929.7 MiB" memory.graph.full="507.0 MiB" memory.graph.partial="1.2 GiB"
time=2024-11-11T19:46:58.390Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:46:58.393Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af --ctx-size 8192 --batch-size 512 --embedding --n-gpu-layers 33 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 1 --port 53133"
time=2024-11-11T19:46:58.395Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:46:58.396Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:46:58.396Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:46:58.536Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:46:58.536Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:46:58.538Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53133"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
time=2024-11-11T19:46:58.648Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 3584
llm_load_print_meta: n_layer = 42
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 8
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 2048
llm_load_print_meta: n_embd_v_gqa = 2048
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 14336
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 9B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.41 MiB
llm_load_tensors: offloading 33 repeating layers to GPU
llm_load_tensors: offloaded 33/43 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 3667.19 MiB
llm_load_tensors: CUDA0 buffer size = 6628.62 MiB
llama_new_context_with_model: n_ctx = 8192
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA_Host KV buffer size = 306.00 MiB
llama_kv_cache_init: CUDA0 KV buffer size = 1122.00 MiB
llama_new_context_with_model: KV self size = 1428.00 MiB, K (q8_0): 714.00 MiB, V (q8_0): 714.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.99 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 1436.69 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 66.01 MiB
llama_new_context_with_model: graph nodes = 1398
llama_new_context_with_model: graph splits = 182
time=2024-11-11T19:47:06.172Z level=INFO source=server.go:685 msg="llama runner started in 7.78 seconds"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:48:32 | 200 | 1m35s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:48:33.677Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="1.6 GiB"
time=2024-11-11T19:48:33.678Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
[GIN] 2024/11/11 - 19:48:37 | 200 | 0s | 127.0.0.1 | HEAD "/"
[GIN] 2024/11/11 - 19:48:37 | 200 | 552.2µs | 127.0.0.1 | GET "/api/ps"
time=2024-11-11T19:48:37.563Z level=INFO source=sched.go:730 msg="new model will fit in available VRAM, loading" model=C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 library=cuda parallel=4 required="4.4 GiB"
time=2024-11-11T19:48:37.651Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="47.8 GiB" free_swap="63.4 GiB"
time=2024-11-11T19:48:37.652Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=27 layers.offload=27 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="4.4 GiB" memory.required.partial="4.4 GiB" memory.required.kv="832.0 MiB" memory.required.allocations="[4.4 GiB]" memory.weights.total="2.8 GiB" memory.weights.repeating="2.2 GiB" memory.weights.nonrepeating="597.7 MiB" memory.graph.full="504.5 MiB" memory.graph.partial="965.9 MiB"
time=2024-11-11T19:48:37.655Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:48:37.658Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 --ctx-size 16384 --batch-size 512 --embedding --n-gpu-layers 27 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 4 --port 53197"
time=2024-11-11T19:48:37.660Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:48:37.660Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:48:37.660Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:48:37.857Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:48:37.858Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:48:37.859Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53197"
time=2024-11-11T19:48:37.912Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: loaded meta data with 40 key-value pairs and 288 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 2b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 2B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 2304
llama_model_loader: - kv 11: gemma2.block_count u32 = 26
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 9216
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 8
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-2b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 182
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 105 tensors
llama_model_loader: - type q8_0: 183 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 249
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 2304
llm_load_print_meta: n_layer = 26
llm_load_print_meta: n_head = 8
llm_load_print_meta: n_head_kv = 4
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 9216
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 2B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 2.61 B
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 2b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.26 MiB
llm_load_tensors: offloading 26 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 27/27 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 597.66 MiB
llm_load_tensors: CUDA0 buffer size = 2649.78 MiB
llama_new_context_with_model: n_ctx = 16384
llama_new_context_with_model: n_batch = 2048
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA0 KV buffer size = 884.00 MiB
llama_new_context_with_model: KV self size = 884.00 MiB, K (q8_0): 442.00 MiB, V (q8_0): 442.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 3.94 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 504.50 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 97.01 MiB
llama_new_context_with_model: graph nodes = 870
llama_new_context_with_model: graph splits = 54
time=2024-11-11T19:48:40.670Z level=INFO source=server.go:685 msg="llama runner started in 3.01 seconds"
[GIN] 2024/11/11 - 19:48:42 | 200 | 9.2365918s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:48:45.841Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="2.5 GiB"
time=2024-11-11T19:48:45.841Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:48:45.842Z level=INFO source=sched.go:730 msg="new model will fit in available VRAM, loading" model=C:\Users\VMZ\.ollama\models\blobs\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 library=cuda parallel=1 required="1.1 GiB"
time=2024-11-11T19:48:45.987Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="46.7 GiB" free_swap="58.1 GiB"
time=2024-11-11T19:48:45.987Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=25 layers.offload=25 layers.split="" memory.available="[2.5 GiB]" memory.gpu_overhead="0 B" memory.required.full="1.1 GiB" memory.required.partial="1.1 GiB" memory.required.kv="12.0 MiB" memory.required.allocations="[1.1 GiB]" memory.weights.total="589.2 MiB" memory.weights.repeating="529.6 MiB" memory.weights.nonrepeating="59.6 MiB" memory.graph.full="32.0 MiB" memory.graph.partial="32.0 MiB"
time=2024-11-11T19:48:45.994Z level=INFO source=server.go:305 msg="Flash attention not enabled"
time=2024-11-11T19:48:45.997Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 --ctx-size 2048 --batch-size 512 --embedding --n-gpu-layers 25 --threads 16 --no-mmap --parallel 1 --port 53200"
time=2024-11-11T19:48:46.005Z level=INFO source=sched.go:449 msg="loaded runners" count=2
time=2024-11-11T19:48:46.005Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:48:46.006Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:48:46.280Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:48:46.280Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:48:46.281Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53200"
llama_model_loader: loaded meta data with 20 key-value pairs and 389 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = bert
llama_model_loader: - kv 1: general.name str = snowflake-arctic-embed-l
llama_model_loader: - kv 2: bert.block_count u32 = 24
llama_model_loader: - kv 3: bert.context_length u32 = 512
llama_model_loader: - kv 4: bert.embedding_length u32 = 1024
llama_model_loader: - kv 5: bert.feed_forward_length u32 = 4096
llama_model_loader: - kv 6: bert.attention.head_count u32 = 16
llama_model_loader: - kv 7: bert.attention.layer_norm_epsilon f32 = 0.000000
llama_model_loader: - kv 8: general.file_type u32 = 1
llama_model_loader: - kv 9: bert.attention.causal bool = false
llama_model_loader: - kv 10: bert.pooling_type u32 = 2
llama_model_loader: - kv 11: tokenizer.ggml.token_type_count u32 = 2
llama_model_loader: - kv 12: tokenizer.ggml.model str = bert
llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,30522] = ["[PAD]", "[unused0]", "[unused1]", "...
llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,30522] = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv 15: tokenizer.ggml.unknown_token_id u32 = 100
llama_model_loader: - kv 16: tokenizer.ggml.seperator_token_id u32 = 102
llama_model_loader: - kv 17: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 18: tokenizer.ggml.cls_token_id u32 = 101
llama_model_loader: - kv 19: tokenizer.ggml.mask_token_id u32 = 103
llama_model_loader: - type f32: 243 tensors
llama_model_loader: - type f16: 146 tensors
llm_load_vocab: special tokens cache size = 5
llm_load_vocab: token to piece cache size = 0.2032 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = bert
llm_load_print_meta: vocab type = WPM
llm_load_print_meta: n_vocab = 30522
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 512
llm_load_print_meta: n_embd = 1024
llm_load_print_meta: n_layer = 24
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 16
llm_load_print_meta: n_rot = 64
llm_load_print_meta: n_swa = 0
llm_load_print_meta: n_embd_head_k = 64
llm_load_print_meta: n_embd_head_v = 64
llm_load_print_meta: n_gqa = 1
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 1.0e-12
llm_load_print_meta: f_norm_rms_eps = 0.0e+00
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 4096
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 0
llm_load_print_meta: pooling type = 2
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 512
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 335M
llm_load_print_meta: model ftype = F16
llm_load_print_meta: model params = 334.09 M
llm_load_print_meta: model size = 637.85 MiB (16.02 BPW)
llm_load_print_meta: general.name = snowflake-arctic-embed-l
llm_load_print_meta: UNK token = 100 '[UNK]'
llm_load_print_meta: SEP token = 102 '[SEP]'
llm_load_print_meta: PAD token = 0 '[PAD]'
llm_load_print_meta: CLS token = 101 '[CLS]'
llm_load_print_meta: MASK token = 103 '[MASK]'
llm_load_print_meta: LF token = 0 '[PAD]'
llm_load_print_meta: max token length = 21
time=2024-11-11T19:48:46.510Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.32 MiB
llm_load_tensors: offloading 24 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 25/25 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 60.62 MiB
llm_load_tensors: CUDA0 buffer size = 577.23 MiB
llama_new_context_with_model: n_ctx = 2048
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA0 KV buffer size = 192.00 MiB
llama_new_context_with_model: KV self size = 192.00 MiB, K (f16): 96.00 MiB, V (f16): 96.00 MiB
llama_new_context_with_model: CPU output buffer size = 0.00 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 25.01 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 5.01 MiB
llama_new_context_with_model: graph nodes = 849
llama_new_context_with_model: graph splits = 2
time=2024-11-11T19:48:47.765Z level=INFO source=server.go:685 msg="llama runner started in 1.76 seconds"
llama_model_loader: loaded meta data with 20 key-value pairs and 389 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-fb3b66c7bdf6dabbb2edbc22627f4cb2df021c9e9545b54feafd8a7c09fe8ec5 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = bert
llama_model_loader: - kv 1: general.name str = snowflake-arctic-embed-l
llama_model_loader: - kv 2: bert.block_count u32 = 24
llama_model_loader: - kv 3: bert.context_length u32 = 512
llama_model_loader: - kv 4: bert.embedding_length u32 = 1024
llama_model_loader: - kv 5: bert.feed_forward_length u32 = 4096
llama_model_loader: - kv 6: bert.attention.head_count u32 = 16
llama_model_loader: - kv 7: bert.attention.layer_norm_epsilon f32 = 0.000000
llama_model_loader: - kv 8: general.file_type u32 = 1
llama_model_loader: - kv 9: bert.attention.causal bool = false
llama_model_loader: - kv 10: bert.pooling_type u32 = 2
llama_model_loader: - kv 11: tokenizer.ggml.token_type_count u32 = 2
llama_model_loader: - kv 12: tokenizer.ggml.model str = bert
llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,30522] = ["[PAD]", "[unused0]", "[unused1]", "...
llama_model_loader: - kv 14: tokenizer.ggml.token_type arr[i32,30522] = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
llama_model_loader: - kv 15: tokenizer.ggml.unknown_token_id u32 = 100
llama_model_loader: - kv 16: tokenizer.ggml.seperator_token_id u32 = 102
llama_model_loader: - kv 17: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 18: tokenizer.ggml.cls_token_id u32 = 101
llama_model_loader: - kv 19: tokenizer.ggml.mask_token_id u32 = 103
llama_model_loader: - type f32: 243 tensors
llama_model_loader: - type f16: 146 tensors
llm_load_vocab: special tokens cache size = 5
llm_load_vocab: token to piece cache size = 0.2032 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = bert
llm_load_print_meta: vocab type = WPM
llm_load_print_meta: n_vocab = 30522
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 334.09 M
llm_load_print_meta: model size = 637.85 MiB (16.02 BPW)
llm_load_print_meta: general.name = snowflake-arctic-embed-l
llm_load_print_meta: UNK token = 100 '[UNK]'
llm_load_print_meta: SEP token = 102 '[SEP]'
llm_load_print_meta: PAD token = 0 '[PAD]'
llm_load_print_meta: CLS token = 101 '[CLS]'
llm_load_print_meta: MASK token = 103 '[MASK]'
llm_load_print_meta: LF token = 0 '[PAD]'
llm_load_print_meta: max token length = 21
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:48:49 | 200 | 3.5420915s | 10.0.0.220 | POST "/api/embed"
time=2024-11-11T19:48:49.772Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="1.4 GiB"
time=2024-11-11T19:48:49.773Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:48:50.559Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="2.5 GiB"
time=2024-11-11T19:48:50.559Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
[GIN] 2024/11/11 - 19:48:51 | 200 | 8.673905s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:48:52.715Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="47.8 GiB" free_swap="63.4 GiB"
time=2024-11-11T19:48:52.716Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=43 layers.offload=22 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="11.7 GiB" memory.required.partial="6.5 GiB" memory.required.kv="672.0 MiB" memory.required.allocations="[6.5 GiB]" memory.weights.total="8.9 GiB" memory.weights.repeating="8.0 GiB" memory.weights.nonrepeating="929.7 MiB" memory.graph.full="507.0 MiB" memory.graph.partial="1.2 GiB"
time=2024-11-11T19:48:52.718Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:48:52.721Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af --ctx-size 4096 --batch-size 512 --embedding --n-gpu-layers 22 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 1 --port 53205"
time=2024-11-11T19:48:52.724Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:48:52.724Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:48:52.725Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:48:52.865Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:48:52.865Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:48:52.867Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53205"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
time=2024-11-11T19:48:52.976Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 3584
llm_load_print_meta: n_layer = 42
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 8
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 2048
llm_load_print_meta: n_embd_v_gqa = 2048
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 14336
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 9B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.41 MiB
llm_load_tensors: offloading 22 repeating layers to GPU
llm_load_tensors: offloaded 22/43 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 5876.73 MiB
llm_load_tensors: CUDA0 buffer size = 4419.08 MiB
llama_new_context_with_model: n_ctx = 4096
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA_Host KV buffer size = 340.00 MiB
llama_kv_cache_init: CUDA0 KV buffer size = 374.00 MiB
llama_new_context_with_model: KV self size = 714.00 MiB, K (q8_0): 357.00 MiB, V (q8_0): 357.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.99 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 1436.69 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 45.01 MiB
llama_new_context_with_model: graph nodes = 1398
llama_new_context_with_model: graph splits = 292
time=2024-11-11T19:48:59.248Z level=INFO source=server.go:685 msg="llama runner started in 6.52 seconds"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:49:20 | 200 | 30.8085176s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:49:21.136Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="314.7 MiB"
time=2024-11-11T19:49:21.137Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:49:24.322Z level=INFO source=sched.go:730 msg="new model will fit in available VRAM, loading" model=C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 library=cuda parallel=4 required="4.4 GiB"
time=2024-11-11T19:49:24.415Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="47.8 GiB" free_swap="63.4 GiB"
time=2024-11-11T19:49:24.416Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=27 layers.offload=27 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="4.4 GiB" memory.required.partial="4.4 GiB" memory.required.kv="832.0 MiB" memory.required.allocations="[4.4 GiB]" memory.weights.total="2.8 GiB" memory.weights.repeating="2.2 GiB" memory.weights.nonrepeating="597.7 MiB" memory.graph.full="504.5 MiB" memory.graph.partial="965.9 MiB"
time=2024-11-11T19:49:24.418Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:49:24.421Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 --ctx-size 16384 --batch-size 512 --embedding --n-gpu-layers 27 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 4 --port 53219"
time=2024-11-11T19:49:24.424Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:49:24.424Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:49:24.425Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:49:24.657Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:49:24.658Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:49:24.659Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53219"
time=2024-11-11T19:49:24.677Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: loaded meta data with 40 key-value pairs and 288 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 2b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 2B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 2304
llama_model_loader: - kv 11: gemma2.block_count u32 = 26
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 9216
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 8
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-2b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 182
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 105 tensors
llama_model_loader: - type q8_0: 183 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 249
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 2304
llm_load_print_meta: n_layer = 26
llm_load_print_meta: n_head = 8
llm_load_print_meta: n_head_kv = 4
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 9216
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 2B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 2.61 B
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 2b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.26 MiB
llm_load_tensors: offloading 26 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 27/27 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 597.66 MiB
llm_load_tensors: CUDA0 buffer size = 2649.78 MiB
llama_new_context_with_model: n_ctx = 16384
llama_new_context_with_model: n_batch = 2048
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA0 KV buffer size = 884.00 MiB
llama_new_context_with_model: KV self size = 884.00 MiB, K (q8_0): 442.00 MiB, V (q8_0): 442.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 3.94 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 504.50 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 97.01 MiB
llama_new_context_with_model: graph nodes = 870
llama_new_context_with_model: graph splits = 54
time=2024-11-11T19:49:27.688Z level=INFO source=server.go:685 msg="llama runner started in 3.26 seconds"
llama_model_loader: loaded meta data with 40 key-value pairs and 288 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 2b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 2B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 2304
llama_model_loader: - kv 11: gemma2.block_count u32 = 26
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 9216
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 8
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-2b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 182
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 105 tensors
llama_model_loader: - type q8_0: 183 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 249
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 2.61 B
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 2b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:50:00 | 200 | 39.7309256s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:50:00.128Z level=WARN source=types.go:509 msg="invalid option provided" option=stream_response
time=2024-11-11T19:50:00.396Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="2.5 GiB"
time=2024-11-11T19:50:00.396Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:50:01.373Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="47.8 GiB" free_swap="63.4 GiB"
time=2024-11-11T19:50:01.373Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=33 layers.model=43 layers.offload=20 layers.split="" memory.available="[6.6 GiB]" memory.gpu_overhead="0 B" memory.required.full="12.3 GiB" memory.required.partial="6.4 GiB" memory.required.kv="1.3 GiB" memory.required.allocations="[6.4 GiB]" memory.weights.total="9.6 GiB" memory.weights.repeating="8.6 GiB" memory.weights.nonrepeating="929.7 MiB" memory.graph.full="507.0 MiB" memory.graph.partial="1.2 GiB"
time=2024-11-11T19:50:01.375Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:50:01.378Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af --ctx-size 8192 --batch-size 512 --embedding --n-gpu-layers 33 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 1 --port 53256"
time=2024-11-11T19:50:01.381Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:50:01.381Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:50:01.382Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:50:01.530Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:50:01.530Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:50:01.531Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53256"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
time=2024-11-11T19:50:01.634Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 3584
llm_load_print_meta: n_layer = 42
llm_load_print_meta: n_head = 16
llm_load_print_meta: n_head_kv = 8
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 2048
llm_load_print_meta: n_embd_v_gqa = 2048
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 14336
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 9B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.41 MiB
llm_load_tensors: offloading 33 repeating layers to GPU
llm_load_tensors: offloaded 33/43 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 3667.19 MiB
llm_load_tensors: CUDA0 buffer size = 6628.62 MiB
llama_new_context_with_model: n_ctx = 8192
llama_new_context_with_model: n_batch = 512
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA_Host KV buffer size = 306.00 MiB
llama_kv_cache_init: CUDA0 KV buffer size = 1122.00 MiB
llama_new_context_with_model: KV self size = 1428.00 MiB, K (q8_0): 714.00 MiB, V (q8_0): 714.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 0.99 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 1436.69 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 66.01 MiB
llama_new_context_with_model: graph nodes = 1398
llama_new_context_with_model: graph splits = 182
time=2024-11-11T19:50:08.406Z level=INFO source=server.go:685 msg="llama runner started in 7.02 seconds"
llama_model_loader: loaded meta data with 40 key-value pairs and 464 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-820716d00fbd469307ccc1ca9f98eead1dba6f42a38f538e362e27190ffd14af (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 9b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 9B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 3584
llama_model_loader: - kv 11: gemma2.block_count u32 = 42
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 14336
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 16
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 8
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% for message in mess...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-9b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 294
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 169 tensors
llama_model_loader: - type q8_0: 295 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 217
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 1
llm_load_print_meta: model type = ?B
llm_load_print_meta: model ftype = all F32
llm_load_print_meta: model params = 9.24 B
llm_load_print_meta: model size = 9.15 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 9b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
llama_model_load: vocab only - skipping tensors
[GIN] 2024/11/11 - 19:51:23 | 200 | 1m23s | 10.0.0.220 | POST "/api/chat"
time=2024-11-11T19:51:24.695Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=GPU-c707ca87-9ffc-11ef-acd4-9c4a84a45058 library=cuda total="8.0 GiB" available="1.6 GiB"
time=2024-11-11T19:51:24.695Z level=INFO source=sched.go:507 msg="updated VRAM based on existing loaded models" gpu=0 library=oneapi total="3.9 GiB" available="3.7 GiB"
time=2024-11-11T19:51:28.585Z level=INFO source=sched.go:730 msg="new model will fit in available VRAM, loading" model=C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 library=cuda parallel=4 required="4.4 GiB"
time=2024-11-11T19:51:28.668Z level=INFO source=server.go:106 msg="system memory" total="54.0 GiB" free="47.8 GiB" free_swap="63.3 GiB"
time=2024-11-11T19:51:28.669Z level=INFO source=memory.go:354 msg="offload to cuda" layers.requested=-1 layers.model=27 layers.offload=27 layers.split="" memory.available="[6.5 GiB]" memory.gpu_overhead="0 B" memory.required.full="4.4 GiB" memory.required.partial="4.4 GiB" memory.required.kv="832.0 MiB" memory.required.allocations="[4.4 GiB]" memory.weights.total="2.8 GiB" memory.weights.repeating="2.2 GiB" memory.weights.nonrepeating="597.7 MiB" memory.graph.full="504.5 MiB" memory.graph.partial="965.9 MiB"
time=2024-11-11T19:51:28.671Z level=INFO source=server.go:300 msg="Enabling flash attention"
time=2024-11-11T19:51:28.674Z level=INFO source=server.go:467 msg="starting llama server" cmd="C:\\Users\\VMZ\\AppData\\Local\\Programs\\Ollama\\lib\\ollama\\runners\\cuda_v12\\ollama_llama_server.exe --model C:\\Users\\VMZ\\.ollama\\models\\blobs\\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 --ctx-size 16384 --batch-size 512 --embedding --n-gpu-layers 27 --threads 16 --flash-attn --cache-type-k q8_0 --cache-type-v q8_0 --no-mmap --parallel 4 --port 53293"
time=2024-11-11T19:51:28.676Z level=INFO source=sched.go:449 msg="loaded runners" count=1
time=2024-11-11T19:51:28.676Z level=INFO source=server.go:646 msg="waiting for llama runner to start responding"
time=2024-11-11T19:51:28.677Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server error"
time=2024-11-11T19:51:28.837Z level=INFO source=runner.go:845 msg="starting go runner"
time=2024-11-11T19:51:28.837Z level=INFO source=runner.go:846 msg=system info="AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 0 | FP16_VA = 0 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | cgo(gcc)" threads=16
time=2024-11-11T19:51:28.838Z level=INFO source=.:0 msg="Server listening on 127.0.0.1:53293"
llama_model_loader: loaded meta data with 40 key-value pairs and 288 tensors from C:\Users\VMZ\.ollama\models\blobs\sha256-e2c23eddd5f577b82ba3714b19c4350edbf1f4edfb7c5a4bc941ebc608b43bc2 (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv 0: general.architecture str = gemma2
llama_model_loader: - kv 1: general.type str = model
llama_model_loader: - kv 2: general.name str = Gemma 2 2b It
llama_model_loader: - kv 3: general.organization str = Google
llama_model_loader: - kv 4: general.finetune str = it
llama_model_loader: - kv 5: general.basename str = gemma-2
llama_model_loader: - kv 6: general.size_label str = 2B
llama_model_loader: - kv 7: general.license str = gemma
llama_model_loader: - kv 8: general.languages arr[str,1] = ["en"]
llama_model_loader: - kv 9: gemma2.context_length u32 = 8192
llama_model_loader: - kv 10: gemma2.embedding_length u32 = 2304
llama_model_loader: - kv 11: gemma2.block_count u32 = 26
llama_model_loader: - kv 12: gemma2.feed_forward_length u32 = 9216
llama_model_loader: - kv 13: gemma2.attention.head_count u32 = 8
llama_model_loader: - kv 14: gemma2.attention.head_count_kv u32 = 4
llama_model_loader: - kv 15: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
llama_model_loader: - kv 16: gemma2.attention.key_length u32 = 256
llama_model_loader: - kv 17: gemma2.attention.value_length u32 = 256
llama_model_loader: - kv 18: general.file_type u32 = 7
llama_model_loader: - kv 19: gemma2.attn_logit_softcapping f32 = 50.000000
llama_model_loader: - kv 20: gemma2.final_logit_softcapping f32 = 30.000000
llama_model_loader: - kv 21: gemma2.attention.sliding_window u32 = 4096
llama_model_loader: - kv 22: tokenizer.ggml.model str = llama
llama_model_loader: - kv 23: tokenizer.ggml.pre str = default
time=2024-11-11T19:51:28.929Z level=INFO source=server.go:680 msg="waiting for server to become available" status="llm server loading model"
llama_model_loader: - kv 24: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", ...
llama_model_loader: - kv 25: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv 27: tokenizer.ggml.bos_token_id u32 = 2
llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 1
llama_model_loader: - kv 29: tokenizer.ggml.unknown_token_id u32 = 3
llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 0
llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = true
llama_model_loader: - kv 32: tokenizer.ggml.add_eos_token bool = false
llama_model_loader: - kv 33: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
llama_model_loader: - kv 34: tokenizer.ggml.add_space_prefix bool = false
llama_model_loader: - kv 35: general.quantization_version u32 = 2
llama_model_loader: - kv 36: quantize.imatrix.file str = /models_out/gemma-2-2b-it-abliterated...
llama_model_loader: - kv 37: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt
llama_model_loader: - kv 38: quantize.imatrix.entries_count i32 = 182
llama_model_loader: - kv 39: quantize.imatrix.chunks_count i32 = 128
llama_model_loader: - type f32: 105 tensors
llama_model_loader: - type q8_0: 183 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 249
llm_load_vocab: token to piece cache size = 1.6014 MB
llm_load_print_meta: format = GGUF V3 (latest)
llm_load_print_meta: arch = gemma2
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 256000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: vocab_only = 0
llm_load_print_meta: n_ctx_train = 8192
llm_load_print_meta: n_embd = 2304
llm_load_print_meta: n_layer = 26
llm_load_print_meta: n_head = 8
llm_load_print_meta: n_head_kv = 4
llm_load_print_meta: n_rot = 256
llm_load_print_meta: n_swa = 4096
llm_load_print_meta: n_embd_head_k = 256
llm_load_print_meta: n_embd_head_v = 256
llm_load_print_meta: n_gqa = 2
llm_load_print_meta: n_embd_k_gqa = 1024
llm_load_print_meta: n_embd_v_gqa = 1024
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale = 0.0e+00
llm_load_print_meta: n_ff = 9216
llm_load_print_meta: n_expert = 0
llm_load_print_meta: n_expert_used = 0
llm_load_print_meta: causal attn = 1
llm_load_print_meta: pooling type = 0
llm_load_print_meta: rope type = 2
llm_load_print_meta: rope scaling = linear
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn = 8192
llm_load_print_meta: rope_finetuned = unknown
llm_load_print_meta: ssm_d_conv = 0
llm_load_print_meta: ssm_d_inner = 0
llm_load_print_meta: ssm_d_state = 0
llm_load_print_meta: ssm_dt_rank = 0
llm_load_print_meta: ssm_dt_b_c_rms = 0
llm_load_print_meta: model type = 2B
llm_load_print_meta: model ftype = Q8_0
llm_load_print_meta: model params = 2.61 B
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
llm_load_print_meta: general.name = Gemma 2 2b It
llm_load_print_meta: BOS token = 2 ''
llm_load_print_meta: EOS token = 1 ''
llm_load_print_meta: UNK token = 3 ''
llm_load_print_meta: PAD token = 0 ''
llm_load_print_meta: LF token = 227 '<0x0A>'
llm_load_print_meta: EOT token = 107 ''
llm_load_print_meta: EOG token = 1 ''
llm_load_print_meta: EOG token = 107 ''
llm_load_print_meta: max token length = 48
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
ggml_cuda_init: found 1 CUDA devices:
Device 0: GRID P40-8A, compute capability 6.1, VMM: no
llm_load_tensors: ggml ctx size = 0.26 MiB
llm_load_tensors: offloading 26 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 27/27 layers to GPU
llm_load_tensors: CUDA_Host buffer size = 597.66 MiB
llm_load_tensors: CUDA0 buffer size = 2649.78 MiB
llama_new_context_with_model: n_ctx = 16384
llama_new_context_with_model: n_batch = 2048
llama_new_context_with_model: n_ubatch = 512
llama_new_context_with_model: flash_attn = 1
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: CUDA0 KV buffer size = 884.00 MiB
llama_new_context_with_model: KV self size = 884.00 MiB, K (q8_0): 442.00 MiB, V (q8_0): 442.00 MiB
llama_new_context_with_model: CUDA_Host output buffer size = 3.94 MiB
llama_new_context_with_model: CUDA0 compute buffer size = 504.50 MiB
llama_new_context_with_model: CUDA_Host compute buffer size = 97.01 MiB
llama_new_context_with_model: graph nodes = 870
llama_new_context_with_model: graph splits = 54
time=2024-11-11T19:51:31.688Z level=INFO source=server.go:685 msg="llama runner started in 3.01 seconds"
[GIN] 2024/11/11 - 19:51:33 | 200 | 9.6983072s | 10.0.0.220 | POST "/api/chat"
[GIN] 2024/11/11 - 19:51:38 | 200 | 5.1640561s | 10.0.0.220 | POST "/api/chat"
Laptop:
Hardware:OS: Windows 10 22H2
CPU: i7-8750H
RAM: 32GB DDR4 2133MHZ
MAIN Drive: WD BLACK 1TB SN850X NVME
2ND Drive: Micron 256GB NVME
3RD Drive: Seagate BarraCuda 2TB
Software: fresh install of the latest ollama
Docker running open-webui
@jessegross commented on GitHub (Nov 12, 2024):
Does it happen if you disable flash attention? Do you know if this is new behavior?