[GH-ISSUE #4297] Please update go module github.com/chewxy/math32 to the last #64720

Closed
opened 2026-05-03 18:36:18 -05:00 by GiteaMirror · 4 comments
Owner

Originally created by @HougeLangley on GitHub (May 9, 2024).
Original GitHub issue: https://github.com/ollama/ollama/issues/4297

What is the issue?

https://github.com/chewxy/math32/issues/46#issuecomment-2103347015

 sipeed @ lpi4a in ~/ollama on git:main o [3:10:57] 
$ go generate ./...        
go: downloading go1.22.0 (linux/riscv64)
go: downloading github.com/google/uuid v1.0.0
go: downloading golang.org/x/crypto v0.14.0
go: downloading google.golang.org/protobuf v1.30.0
go: downloading github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
go: downloading github.com/mitchellh/mapstructure v1.5.0
go: downloading github.com/nlpodyssey/gopickle v0.3.0
go: downloading github.com/pdevine/tensor v0.0.0-20240228013915-64ccaa8d9ca9
go: downloading github.com/x448/float16 v0.8.4
go: downloading golang.org/x/sys v0.13.0
go: downloading golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
go: downloading golang.org/x/sync v0.3.0
go: downloading github.com/gin-gonic/gin v1.9.1
go: downloading golang.org/x/term v0.13.0
go: downloading github.com/emirpasic/gods v1.18.1
go: downloading github.com/gin-contrib/cors v1.4.0
go: downloading github.com/containerd/console v1.0.3
go: downloading github.com/olekukonko/tablewriter v0.0.5
go: downloading github.com/spf13/cobra v1.7.0
go: downloading golang.org/x/text v0.14.0
go: downloading github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc
go: downloading github.com/chewxy/hm v1.0.0
go: downloading github.com/chewxy/math32 v1.0.8
go: downloading github.com/google/flatbuffers v1.12.0
go: downloading github.com/pkg/errors v0.9.1
go: downloading go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6
go: downloading gonum.org/v1/gonum v0.8.2
go: downloading gorgonia.org/vecf32 v0.9.0
go: downloading gorgonia.org/vecf64 v0.9.0
go: downloading github.com/gin-contrib/sse v0.1.0
go: downloading github.com/mattn/go-isatty v0.0.19
go: downloading golang.org/x/net v0.17.0
go: downloading github.com/mattn/go-runewidth v0.0.14
go: downloading github.com/spf13/pflag v1.0.5
go: downloading golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
go: downloading github.com/xtgo/set v1.0.0
go: downloading github.com/gogo/protobuf v1.3.2
go: downloading github.com/golang/protobuf v1.5.0
go: downloading github.com/go-playground/validator/v10 v10.14.0
go: downloading github.com/pelletier/go-toml/v2 v2.0.8
go: downloading github.com/ugorji/go/codec v1.2.11
go: downloading gopkg.in/yaml.v3 v3.0.1
go: downloading github.com/rivo/uniseg v0.2.0
go: downloading github.com/gabriel-vasile/mimetype v1.4.2
go: downloading github.com/go-playground/universal-translator v0.18.1
go: downloading github.com/leodido/go-urn v1.2.4
go: downloading github.com/go-playground/locales v0.14.1
+ set -o pipefail
+ echo 'Starting linux generate script'
Starting linux generate script
+ '[' -z '' ']'
+ '[' -x /usr/local/cuda/bin/nvcc ']'
++ command -v nvcc
+ export CUDACXX=
+ CUDACXX=
+ COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off'
++ dirname ./gen_linux.sh
+ source ./gen_common.sh
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '' ']'
+ CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80'
+ git_module_setup
+ '[' -n '' ']'
+ '[' -d ../llama.cpp/gguf ']'
+ git submodule init
+ git submodule update --force ../llama.cpp
子模组路径 '../llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465'
+ apply_patches
+ grep ollama ../llama.cpp/CMakeLists.txt
+ echo 'add_subdirectory(../ext_server ext_server) # ollama'
++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff
+ '[' -n '../patches/02-clip-log.diff
../patches/03-load_exception.diff
../patches/04-metal.diff
../patches/05-clip-fix.diff' ']'
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/02-clip-log.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/03-load_exception.diff
++ cut -f2- -d/
++ cut -f2 '-d '
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/04-metal.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout ggml-metal.m
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/05-clip-fix.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/02-clip-log.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/03-load_exception.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/04-metal.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/05-clip-fix.diff
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ '[' -z '' -o '' = static ']'
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ CMAKE_TARGETS='--target llama --target ggml'
+ CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ BUILD_DIR=../build/linux/riscv64_static
+ echo 'Building static library'
Building static library
+ build
+ cmake -S ../llama.cpp -B ../build/linux/riscv64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off
-- The C compiler identification is GNU 13.2.0
-- The CXX compiler identification is GNU 13.2.0
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /usr/bin/cc - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /usr/bin/git (found version "2.40.1") 
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE  
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: riscv64
-- Unknown architecture
-- Configuring done
-- Generating done
-- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64_static
+ cmake --build ../build/linux/riscv64_static --target llama --target ggml -j8
[ 33%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o
[ 33%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o
[ 50%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o
[ 50%] Building C object CMakeFiles/ggml.dir/ggml.c.o
[ 50%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o
[ 50%] Built target ggml
[ 83%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o
[ 83%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o
[ 83%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o
[100%] Linking CXX static library libllama.a
[100%] Built target llama
[100%] Built target ggml
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ '[' -z '' ']'
+ '[' -n '' ']'
+ COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off'
+ '[' -z '' -o '' = cpu ']'
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ BUILD_DIR=../build/linux/riscv64/cpu
+ echo 'Building LCD CPU'
Building LCD CPU
+ build
+ cmake -S ../llama.cpp -B ../build/linux/riscv64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off
-- The C compiler identification is GNU 13.2.0
-- The CXX compiler identification is GNU 13.2.0
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /usr/bin/cc - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /usr/bin/git (found version "2.40.1") 
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE  
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: riscv64
-- Unknown architecture
-- Configuring done
-- Generating done
-- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64/cpu
+ cmake --build ../build/linux/riscv64/cpu --target ollama_llama_server -j8
[  6%] Generating build details from Git
[  6%] Building C object CMakeFiles/ggml.dir/ggml.c.o
[ 12%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o
[ 18%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o
[ 18%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o
-- Found Git: /usr/bin/git (found version "2.40.1") 
[ 25%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o
[ 31%] Building CXX object common/CMakeFiles/build_info.dir/build-info.cpp.o
[ 31%] Built target build_info
[ 31%] Built target ggml
[ 37%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o
[ 43%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o
[ 43%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o
[ 50%] Linking CXX static library libllama.a
[ 50%] Built target llama
[ 56%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o
[ 62%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o
[ 56%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o
[ 68%] Building CXX object common/CMakeFiles/common.dir/console.cpp.o
[ 75%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o
[ 81%] Building CXX object common/CMakeFiles/common.dir/json-schema-to-grammar.cpp.o
[ 81%] Building CXX object common/CMakeFiles/common.dir/grammar-parser.cpp.o
[ 87%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o
[ 87%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o
[ 93%] Linking CXX static library libcommon.a
[ 93%] Built target llava
[ 93%] Built target common
[ 93%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o
[100%] Linking CXX executable ../bin/ollama_llama_server
[100%] Built target ollama_llama_server
+ compress
+ echo 'Compressing payloads to reduce overall binary size...'
Compressing payloads to reduce overall binary size...
+ pids=
+ rm -rf '../build/linux/riscv64/cpu/bin/*.gz'
+ for f in ${BUILD_DIR}/bin/*
+ pids+=' 2078'
+ '[' -d ../build/linux/riscv64/cpu/lib ']'
+ gzip -n --best -f ../build/linux/riscv64/cpu/bin/ollama_llama_server
+ echo

+ for pid in ${pids}
+ wait 2078
+ echo 'Finished compression'
Finished compression
+ '[' riscv64 == x86_64 ']'
+ '[' -z '' ']'
+ '[' -d /usr/local/cuda/lib64 ']'
+ '[' -z '' ']'
+ '[' -d /opt/cuda/targets/x86_64-linux/lib ']'
+ '[' -z '' ']'
+ CUDART_LIB_DIR=
+ '[' -d '' ']'
+ '[' -z '' ']'
+ ROCM_PATH=/opt/rocm
+ '[' -z '' ']'
+ '[' -d /usr/lib/cmake/CLBlast ']'
+ '[' -d /opt/rocm ']'
+ cleanup
+ cd ../llama.cpp/
+ git checkout CMakeLists.txt
从索引区更新了 1 个路径
++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff
+ '[' -n '../patches/02-clip-log.diff
../patches/03-load_exception.diff
../patches/04-metal.diff
../patches/05-clip-fix.diff' ']'
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/02-clip-log.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/03-load_exception.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/04-metal.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout ggml-metal.m
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/05-clip-fix.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
++ cd ../build/linux/riscv64/cpu/..
++ echo cpu
+ echo 'go generate completed.  LLM runners: cpu'
go generate completed.  LLM runners: cpu

# sipeed @ lpi4a in ~/ollama on git:main o [3:27:53] 
$ go build .
# github.com/chewxy/math32
../go/pkg/mod/github.com/chewxy/math32@v1.0.8/exp.go:3:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.0.8/exp.go:57:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.0.8/sqrt.go:3:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.0.8/log.go:76:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.0.8/remainder.go:33:6: missing function body

OS

Linux

GPU

Other

CPU

Other

Ollama version

0.1.34

Originally created by @HougeLangley on GitHub (May 9, 2024). Original GitHub issue: https://github.com/ollama/ollama/issues/4297 ### What is the issue? https://github.com/chewxy/math32/issues/46#issuecomment-2103347015 ``` sipeed @ lpi4a in ~/ollama on git:main o [3:10:57] $ go generate ./... go: downloading go1.22.0 (linux/riscv64) go: downloading github.com/google/uuid v1.0.0 go: downloading golang.org/x/crypto v0.14.0 go: downloading google.golang.org/protobuf v1.30.0 go: downloading github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 go: downloading github.com/mitchellh/mapstructure v1.5.0 go: downloading github.com/nlpodyssey/gopickle v0.3.0 go: downloading github.com/pdevine/tensor v0.0.0-20240228013915-64ccaa8d9ca9 go: downloading github.com/x448/float16 v0.8.4 go: downloading golang.org/x/sys v0.13.0 go: downloading golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 go: downloading golang.org/x/sync v0.3.0 go: downloading github.com/gin-gonic/gin v1.9.1 go: downloading golang.org/x/term v0.13.0 go: downloading github.com/emirpasic/gods v1.18.1 go: downloading github.com/gin-contrib/cors v1.4.0 go: downloading github.com/containerd/console v1.0.3 go: downloading github.com/olekukonko/tablewriter v0.0.5 go: downloading github.com/spf13/cobra v1.7.0 go: downloading golang.org/x/text v0.14.0 go: downloading github.com/apache/arrow/go/arrow v0.0.0-20201229220542-30ce2eb5d4dc go: downloading github.com/chewxy/hm v1.0.0 go: downloading github.com/chewxy/math32 v1.0.8 go: downloading github.com/google/flatbuffers v1.12.0 go: downloading github.com/pkg/errors v0.9.1 go: downloading go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 go: downloading gonum.org/v1/gonum v0.8.2 go: downloading gorgonia.org/vecf32 v0.9.0 go: downloading gorgonia.org/vecf64 v0.9.0 go: downloading github.com/gin-contrib/sse v0.1.0 go: downloading github.com/mattn/go-isatty v0.0.19 go: downloading golang.org/x/net v0.17.0 go: downloading github.com/mattn/go-runewidth v0.0.14 go: downloading github.com/spf13/pflag v1.0.5 go: downloading golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 go: downloading github.com/xtgo/set v1.0.0 go: downloading github.com/gogo/protobuf v1.3.2 go: downloading github.com/golang/protobuf v1.5.0 go: downloading github.com/go-playground/validator/v10 v10.14.0 go: downloading github.com/pelletier/go-toml/v2 v2.0.8 go: downloading github.com/ugorji/go/codec v1.2.11 go: downloading gopkg.in/yaml.v3 v3.0.1 go: downloading github.com/rivo/uniseg v0.2.0 go: downloading github.com/gabriel-vasile/mimetype v1.4.2 go: downloading github.com/go-playground/universal-translator v0.18.1 go: downloading github.com/leodido/go-urn v1.2.4 go: downloading github.com/go-playground/locales v0.14.1 + set -o pipefail + echo 'Starting linux generate script' Starting linux generate script + '[' -z '' ']' + '[' -x /usr/local/cuda/bin/nvcc ']' ++ command -v nvcc + export CUDACXX= + CUDACXX= + COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off' ++ dirname ./gen_linux.sh + source ./gen_common.sh + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '' ']' + CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80' + git_module_setup + '[' -n '' ']' + '[' -d ../llama.cpp/gguf ']' + git submodule init + git submodule update --force ../llama.cpp 子模组路径 '../llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465' + apply_patches + grep ollama ../llama.cpp/CMakeLists.txt + echo 'add_subdirectory(../ext_server ext_server) # ollama' ++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff + '[' -n '../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2- -d/ ++ cut -f2 '-d ' + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-clip-fix.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/02-clip-log.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/03-load_exception.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/04-metal.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/05-clip-fix.diff + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' -o '' = static ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_TARGETS='--target llama --target ggml' + CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/riscv64_static + echo 'Building static library' Building static library + build + cmake -S ../llama.cpp -B ../build/linux/riscv64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- The C compiler identification is GNU 13.2.0 -- The CXX compiler identification is GNU 13.2.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/bin/git (found version "2.40.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: riscv64 -- Unknown architecture -- Configuring done -- Generating done -- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64_static + cmake --build ../build/linux/riscv64_static --target llama --target ggml -j8 [ 33%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 33%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 50%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o [ 50%] Building C object CMakeFiles/ggml.dir/ggml.c.o [ 50%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o [ 50%] Built target ggml [ 83%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 83%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o [ 83%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o [100%] Linking CXX static library libllama.a [100%] Built target llama [100%] Built target ggml + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' ']' + '[' -n '' ']' + COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off' + '[' -z '' -o '' = cpu ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/riscv64/cpu + echo 'Building LCD CPU' Building LCD CPU + build + cmake -S ../llama.cpp -B ../build/linux/riscv64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- The C compiler identification is GNU 13.2.0 -- The CXX compiler identification is GNU 13.2.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/bin/git (found version "2.40.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: riscv64 -- Unknown architecture -- Configuring done -- Generating done -- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64/cpu + cmake --build ../build/linux/riscv64/cpu --target ollama_llama_server -j8 [ 6%] Generating build details from Git [ 6%] Building C object CMakeFiles/ggml.dir/ggml.c.o [ 12%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 18%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 18%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o -- Found Git: /usr/bin/git (found version "2.40.1") [ 25%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o [ 31%] Building CXX object common/CMakeFiles/build_info.dir/build-info.cpp.o [ 31%] Built target build_info [ 31%] Built target ggml [ 37%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 43%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o [ 43%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o [ 50%] Linking CXX static library libllama.a [ 50%] Built target llama [ 56%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o [ 62%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o [ 56%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o [ 68%] Building CXX object common/CMakeFiles/common.dir/console.cpp.o [ 75%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o [ 81%] Building CXX object common/CMakeFiles/common.dir/json-schema-to-grammar.cpp.o [ 81%] Building CXX object common/CMakeFiles/common.dir/grammar-parser.cpp.o [ 87%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o [ 87%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o [ 93%] Linking CXX static library libcommon.a [ 93%] Built target llava [ 93%] Built target common [ 93%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o [100%] Linking CXX executable ../bin/ollama_llama_server [100%] Built target ollama_llama_server + compress + echo 'Compressing payloads to reduce overall binary size...' Compressing payloads to reduce overall binary size... + pids= + rm -rf '../build/linux/riscv64/cpu/bin/*.gz' + for f in ${BUILD_DIR}/bin/* + pids+=' 2078' + '[' -d ../build/linux/riscv64/cpu/lib ']' + gzip -n --best -f ../build/linux/riscv64/cpu/bin/ollama_llama_server + echo + for pid in ${pids} + wait 2078 + echo 'Finished compression' Finished compression + '[' riscv64 == x86_64 ']' + '[' -z '' ']' + '[' -d /usr/local/cuda/lib64 ']' + '[' -z '' ']' + '[' -d /opt/cuda/targets/x86_64-linux/lib ']' + '[' -z '' ']' + CUDART_LIB_DIR= + '[' -d '' ']' + '[' -z '' ']' + ROCM_PATH=/opt/rocm + '[' -z '' ']' + '[' -d /usr/lib/cmake/CLBlast ']' + '[' -d /opt/rocm ']' + cleanup + cd ../llama.cpp/ + git checkout CMakeLists.txt 从索引区更新了 1 个路径 ++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff + '[' -n '../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-clip-fix.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 ++ cd ../build/linux/riscv64/cpu/.. ++ echo cpu + echo 'go generate completed. LLM runners: cpu' go generate completed. LLM runners: cpu # sipeed @ lpi4a in ~/ollama on git:main o [3:27:53] $ go build . # github.com/chewxy/math32 ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/exp.go:3:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/exp.go:57:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/sqrt.go:3:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/log.go:76:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.0.8/remainder.go:33:6: missing function body ``` ### OS Linux ### GPU Other ### CPU Other ### Ollama version 0.1.34
GiteaMirror added the bug label 2026-05-03 18:36:18 -05:00
Author
Owner

@HougeLangley commented on GitHub (May 10, 2024):

For now, Using this patch https://github.com/felixonmars/archriscv-packages/blob/master/ollama/riscv64.patch

go build . will get another bug

# sipeed @ lpi4a in ~/ollama on git:main x [0:23:31] 
$ go build .                                                                                                            
# github.com/ollama/ollama
/home/sipeed/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.22.0.linux-riscv64/pkg/tool/linux_riscv64/link: running gcc failed: exit status 1
/usr/bin/ld: /tmp/go-link-2991158597/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize':
/tmp/go-build/llm.cgo2.c:69: undefined reference to `llama_model_quantize'
/usr/bin/ld: /tmp/go-link-2991158597/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize_default_params':
/tmp/go-build/llm.cgo2.c:86: undefined reference to `llama_model_quantize_default_params'
/usr/bin/ld: /tmp/go-link-2991158597/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_print_system_info':
/tmp/go-build/llm.cgo2.c:103: undefined reference to `llama_print_system_info'
collect2: 错误:ld 返回 1
<!-- gh-comment-id:2104894942 --> @HougeLangley commented on GitHub (May 10, 2024): For now, Using this patch https://github.com/felixonmars/archriscv-packages/blob/master/ollama/riscv64.patch `go build .` will get another bug ``` # sipeed @ lpi4a in ~/ollama on git:main x [0:23:31] $ go build . # github.com/ollama/ollama /home/sipeed/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.22.0.linux-riscv64/pkg/tool/linux_riscv64/link: running gcc failed: exit status 1 /usr/bin/ld: /tmp/go-link-2991158597/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize': /tmp/go-build/llm.cgo2.c:69: undefined reference to `llama_model_quantize' /usr/bin/ld: /tmp/go-link-2991158597/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize_default_params': /tmp/go-build/llm.cgo2.c:86: undefined reference to `llama_model_quantize_default_params' /usr/bin/ld: /tmp/go-link-2991158597/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_print_system_info': /tmp/go-build/llm.cgo2.c:103: undefined reference to `llama_print_system_info' collect2: 错误:ld 返回 1 ```
Author
Owner

@HougeLangley commented on GitHub (May 11, 2024):

Bugs still

<!-- gh-comment-id:2105957979 --> @HougeLangley commented on GitHub (May 11, 2024): Bugs still
Author
Owner

@HougeLangley commented on GitHub (May 11, 2024):

# sipeed @ lpi4a in ~ [0:48:31] 
$ git clone https://github.com/ollama/ollama.git
正克隆到 'ollama'...
remote: Enumerating objects: 15370, done.
remote: Counting objects: 100% (2889/2889), done.
remote: Compressing objects: 100% (564/564), done.
remote: Total 15370 (delta 2609), reused 2396 (delta 2325), pack-reused 12481
接收对象中: 100% (15370/15370), 10.97 MiB | 4.25 MiB/s, 完成.
处理 delta 中: 100% (9701/9701), 完成.

# sipeed @ lpi4a in ~ [0:48:42] 
$ cd ollama 

# sipeed @ lpi4a in ~/ollama on git:main o [0:48:46] 
$ git submodule init                            
子模组 'llama.cpp'(https://github.com/ggerganov/llama.cpp.git)已对路径 'llm/llama.cpp' 注册

# sipeed @ lpi4a in ~/ollama on git:main o [0:48:51] 
$ git submodule update
正克隆到 '/home/sipeed/ollama/llm/llama.cpp'...
remote: Enumerating objects: 15604, done.
remote: Counting objects: 100% (15604/15604), done.
remote: Compressing objects: 100% (4240/4240), done.
remote: Total 15194 (delta 11352), reused 14648 (delta 10828), pack-reused 0
接收对象中: 100% (15194/15194), 14.24 MiB | 4.51 MiB/s, 完成.
处理 delta 中: 100% (11352/11352), 完成 336 个本地对象.
来自 https://github.com/ggerganov/llama.cpp
 * branch            952d03dbead16e4dbdd1d3458486340673cc2465 -> FETCH_HEAD
子模组路径 'llm/llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465'

# sipeed @ lpi4a in ~/ollama on git:main o [0:49:27] 
$ go generate ./...                                                          
go: downloading github.com/google/uuid v1.1.2
go: downloading golang.org/x/crypto v0.23.0
go: downloading golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
go: downloading golang.org/x/term v0.20.0
go: downloading github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
go: downloading google.golang.org/protobuf v1.34.1
go: downloading golang.org/x/sys v0.20.0
go: downloading github.com/gin-gonic/gin v1.10.0
go: downloading github.com/gin-contrib/cors v1.7.2
go: downloading golang.org/x/text v0.15.0
go: downloading github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40
go: downloading github.com/google/flatbuffers v24.3.25+incompatible
go: downloading gonum.org/v1/gonum v0.15.0
go: downloading github.com/mattn/go-isatty v0.0.20
go: downloading golang.org/x/net v0.25.0
go: downloading github.com/golang/protobuf v1.5.4
go: downloading github.com/go-playground/validator/v10 v10.20.0
go: downloading github.com/pelletier/go-toml/v2 v2.2.2
go: downloading github.com/ugorji/go/codec v1.2.12
go: downloading github.com/leodido/go-urn v1.4.0
go: downloading github.com/gabriel-vasile/mimetype v1.4.3
+ set -o pipefail
+ echo 'Starting linux generate script'
Starting linux generate script
+ '[' -z '' ']'
+ '[' -x /usr/local/cuda/bin/nvcc ']'
++ command -v nvcc
+ export CUDACXX=
+ CUDACXX=
+ COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off'
++ dirname ./gen_linux.sh
+ source ./gen_common.sh
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '' ']'
+ CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80'
+ git_module_setup
+ '[' -n '' ']'
+ '[' -d ../llama.cpp/gguf ']'
+ git submodule init
+ git submodule update --force ../llama.cpp
子模组路径 '../llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465'
+ apply_patches
+ grep ollama ../llama.cpp/CMakeLists.txt
+ echo 'add_subdirectory(../ext_server ext_server) # ollama'
++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff
+ '[' -n '../patches/02-clip-log.diff
../patches/03-load_exception.diff
../patches/04-metal.diff
../patches/05-clip-fix.diff' ']'
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/02-clip-log.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/03-load_exception.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/04-metal.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout ggml-metal.m
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/05-clip-fix.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/02-clip-log.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/03-load_exception.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/04-metal.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/05-clip-fix.diff
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ '[' -z '' -o '' = static ']'
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ CMAKE_TARGETS='--target llama --target ggml'
+ CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ BUILD_DIR=../build/linux/riscv64_static
+ echo 'Building static library'
Building static library
+ build
+ cmake -S ../llama.cpp -B ../build/linux/riscv64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off
-- The C compiler identification is GNU 13.2.0
-- The CXX compiler identification is GNU 13.2.0
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /usr/bin/cc - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /usr/bin/git (found version "2.40.1") 
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE  
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: riscv64
-- Unknown architecture
-- Configuring done
-- Generating done
-- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64_static
+ cmake --build ../build/linux/riscv64_static --target llama --target ggml -j8
[ 16%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o
[ 33%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o
[ 50%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o
[ 50%] Building C object CMakeFiles/ggml.dir/ggml.c.o
[ 50%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o
[ 50%] Built target ggml
[ 83%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o
[ 83%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o
[ 83%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o
[100%] Linking CXX static library libllama.a
[100%] Built target llama
[100%] Built target ggml
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ '[' -z '' ']'
+ '[' -n '' ']'
+ COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off'
+ '[' -z '' -o '' = cpu ']'
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=riscv64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ BUILD_DIR=../build/linux/riscv64/cpu
+ echo 'Building LCD CPU'
Building LCD CPU
+ build
+ cmake -S ../llama.cpp -B ../build/linux/riscv64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off
-- The C compiler identification is GNU 13.2.0
-- The CXX compiler identification is GNU 13.2.0
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /usr/bin/cc - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found Git: /usr/bin/git (found version "2.40.1") 
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success
-- Found Threads: TRUE  
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: riscv64
-- Unknown architecture
-- Configuring done
-- Generating done
-- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64/cpu
+ cmake --build ../build/linux/riscv64/cpu --target ollama_llama_server -j8
[  6%] Generating build details from Git
[  6%] Building C object CMakeFiles/ggml.dir/ggml.c.o
[  6%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o
[ 12%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o
-- Found Git: /usr/bin/git (found version "2.40.1") 
[ 18%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o
[ 25%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o
[ 31%] Building CXX object common/CMakeFiles/build_info.dir/build-info.cpp.o
[ 31%] Built target build_info
[ 31%] Built target ggml
[ 43%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o
[ 43%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o
[ 43%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o
[ 50%] Linking CXX static library libllama.a
[ 50%] Built target llama
[ 56%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o
[ 56%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o
[ 62%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o
[ 62%] Building CXX object common/CMakeFiles/common.dir/grammar-parser.cpp.o
[ 68%] Building CXX object common/CMakeFiles/common.dir/console.cpp.o
[ 75%] Building CXX object common/CMakeFiles/common.dir/json-schema-to-grammar.cpp.o
[ 81%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o
[ 87%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o
[ 87%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o
[ 93%] Linking CXX static library libcommon.a
[ 93%] Built target common
[ 93%] Built target llava
[ 93%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o
[100%] Linking CXX executable ../bin/ollama_llama_server
[100%] Built target ollama_llama_server
+ compress
+ echo 'Compressing payloads to reduce overall binary size...'
Compressing payloads to reduce overall binary size...
+ pids=
+ rm -rf '../build/linux/riscv64/cpu/bin/*.gz'
+ for f in ${BUILD_DIR}/bin/*
+ pids+=' 17748'
+ gzip -n --best -f ../build/linux/riscv64/cpu/bin/ollama_llama_server
+ '[' -d ../build/linux/riscv64/cpu/lib ']'
+ echo

+ for pid in ${pids}
+ wait 17748
+ echo 'Finished compression'
Finished compression
+ '[' riscv64 == x86_64 ']'
+ '[' -z '' ']'
+ '[' -d /usr/local/cuda/lib64 ']'
+ '[' -z '' ']'
+ '[' -d /opt/cuda/targets/x86_64-linux/lib ']'
+ '[' -z '' ']'
+ CUDART_LIB_DIR=
+ '[' -d '' ']'
+ '[' -z '' ']'
+ ROCM_PATH=/opt/rocm
+ '[' -z '' ']'
+ '[' -d /usr/lib/cmake/CLBlast ']'
+ '[' -d /opt/rocm ']'
+ cleanup
+ cd ../llama.cpp/
+ git checkout CMakeLists.txt
从索引区更新了 1 个路径
++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff
+ '[' -n '../patches/02-clip-log.diff
../patches/03-load_exception.diff
../patches/04-metal.diff
../patches/05-clip-fix.diff' ']'
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/02-clip-log.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/03-load_exception.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/04-metal.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout ggml-metal.m
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/05-clip-fix.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
++ cd ../build/linux/riscv64/cpu/..
++ echo cpu
+ echo 'go generate completed.  LLM runners: cpu'
go generate completed.  LLM runners: cpu

# sipeed @ lpi4a in ~/ollama on git:main o [1:05:33] 
$ go build .       
# github.com/chewxy/math32
../go/pkg/mod/github.com/chewxy/math32@v1.10.1/sqrt.go:3:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.10.1/log.go:76:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.10.1/exp.go:3:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.10.1/exp.go:57:6: missing function body
../go/pkg/mod/github.com/chewxy/math32@v1.10.1/remainder.go:33:6: missing function body

# sipeed @ lpi4a in ~/ollama on git:main o [1:07:32] C:1
$ go get -u github.com/chewxy/math32@7caa3bb
go: downloading github.com/chewxy/math32 v1.10.2-0.20240509203351-7caa3bba2ee1
go: upgraded github.com/chewxy/math32 v1.10.1 => v1.10.2-0.20240509203351-7caa3bba2ee1

# sipeed @ lpi4a in ~/ollama on git:main x [1:07:50] 
$ go build .                                
# github.com/ollama/ollama
/home/sipeed/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.22.0.linux-riscv64/pkg/tool/linux_riscv64/link: running gcc failed: exit status 1
/usr/bin/ld: /tmp/go-link-2984308571/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize':
/tmp/go-build/llm.cgo2.c:69: undefined reference to `llama_model_quantize'
/usr/bin/ld: /tmp/go-link-2984308571/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize_default_params':
/tmp/go-build/llm.cgo2.c:86: undefined reference to `llama_model_quantize_default_params'
/usr/bin/ld: /tmp/go-link-2984308571/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_print_system_info':
/tmp/go-build/llm.cgo2.c:103: undefined reference to `llama_print_system_info'
collect2: 错误:ld 返回 1

<!-- gh-comment-id:2105958377 --> @HougeLangley commented on GitHub (May 11, 2024): ``` # sipeed @ lpi4a in ~ [0:48:31] $ git clone https://github.com/ollama/ollama.git 正克隆到 'ollama'... remote: Enumerating objects: 15370, done. remote: Counting objects: 100% (2889/2889), done. remote: Compressing objects: 100% (564/564), done. remote: Total 15370 (delta 2609), reused 2396 (delta 2325), pack-reused 12481 接收对象中: 100% (15370/15370), 10.97 MiB | 4.25 MiB/s, 完成. 处理 delta 中: 100% (9701/9701), 完成. # sipeed @ lpi4a in ~ [0:48:42] $ cd ollama # sipeed @ lpi4a in ~/ollama on git:main o [0:48:46] $ git submodule init 子模组 'llama.cpp'(https://github.com/ggerganov/llama.cpp.git)已对路径 'llm/llama.cpp' 注册 # sipeed @ lpi4a in ~/ollama on git:main o [0:48:51] $ git submodule update 正克隆到 '/home/sipeed/ollama/llm/llama.cpp'... remote: Enumerating objects: 15604, done. remote: Counting objects: 100% (15604/15604), done. remote: Compressing objects: 100% (4240/4240), done. remote: Total 15194 (delta 11352), reused 14648 (delta 10828), pack-reused 0 接收对象中: 100% (15194/15194), 14.24 MiB | 4.51 MiB/s, 完成. 处理 delta 中: 100% (11352/11352), 完成 336 个本地对象. 来自 https://github.com/ggerganov/llama.cpp * branch 952d03dbead16e4dbdd1d3458486340673cc2465 -> FETCH_HEAD 子模组路径 'llm/llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465' # sipeed @ lpi4a in ~/ollama on git:main o [0:49:27] $ go generate ./... go: downloading github.com/google/uuid v1.1.2 go: downloading golang.org/x/crypto v0.23.0 go: downloading golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa go: downloading golang.org/x/term v0.20.0 go: downloading github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c go: downloading google.golang.org/protobuf v1.34.1 go: downloading golang.org/x/sys v0.20.0 go: downloading github.com/gin-gonic/gin v1.10.0 go: downloading github.com/gin-contrib/cors v1.7.2 go: downloading golang.org/x/text v0.15.0 go: downloading github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 go: downloading github.com/google/flatbuffers v24.3.25+incompatible go: downloading gonum.org/v1/gonum v0.15.0 go: downloading github.com/mattn/go-isatty v0.0.20 go: downloading golang.org/x/net v0.25.0 go: downloading github.com/golang/protobuf v1.5.4 go: downloading github.com/go-playground/validator/v10 v10.20.0 go: downloading github.com/pelletier/go-toml/v2 v2.2.2 go: downloading github.com/ugorji/go/codec v1.2.12 go: downloading github.com/leodido/go-urn v1.4.0 go: downloading github.com/gabriel-vasile/mimetype v1.4.3 + set -o pipefail + echo 'Starting linux generate script' Starting linux generate script + '[' -z '' ']' + '[' -x /usr/local/cuda/bin/nvcc ']' ++ command -v nvcc + export CUDACXX= + CUDACXX= + COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off' ++ dirname ./gen_linux.sh + source ./gen_common.sh + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '' ']' + CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80' + git_module_setup + '[' -n '' ']' + '[' -d ../llama.cpp/gguf ']' + git submodule init + git submodule update --force ../llama.cpp 子模组路径 '../llama.cpp':检出 '952d03dbead16e4dbdd1d3458486340673cc2465' + apply_patches + grep ollama ../llama.cpp/CMakeLists.txt + echo 'add_subdirectory(../ext_server ext_server) # ollama' ++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff + '[' -n '../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-clip-fix.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/02-clip-log.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/03-load_exception.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/04-metal.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/05-clip-fix.diff + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' -o '' = static ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_TARGETS='--target llama --target ggml' + CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/riscv64_static + echo 'Building static library' Building static library + build + cmake -S ../llama.cpp -B ../build/linux/riscv64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- The C compiler identification is GNU 13.2.0 -- The CXX compiler identification is GNU 13.2.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/bin/git (found version "2.40.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: riscv64 -- Unknown architecture -- Configuring done -- Generating done -- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64_static + cmake --build ../build/linux/riscv64_static --target llama --target ggml -j8 [ 16%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o [ 33%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 50%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o [ 50%] Building C object CMakeFiles/ggml.dir/ggml.c.o [ 50%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o [ 50%] Built target ggml [ 83%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o [ 83%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o [ 83%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [100%] Linking CXX static library libllama.a [100%] Built target llama [100%] Built target ggml + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' ']' + '[' -n '' ']' + COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off' + '[' -z '' -o '' = cpu ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=riscv64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/riscv64/cpu + echo 'Building LCD CPU' Building LCD CPU + build + cmake -S ../llama.cpp -B ../build/linux/riscv64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- The C compiler identification is GNU 13.2.0 -- The CXX compiler identification is GNU 13.2.0 -- Detecting C compiler ABI info -- Detecting C compiler ABI info - done -- Check for working C compiler: /usr/bin/cc - skipped -- Detecting C compile features -- Detecting C compile features - done -- Detecting CXX compiler ABI info -- Detecting CXX compiler ABI info - done -- Check for working CXX compiler: /usr/bin/c++ - skipped -- Detecting CXX compile features -- Detecting CXX compile features - done -- Found Git: /usr/bin/git (found version "2.40.1") -- Performing Test CMAKE_HAVE_LIBC_PTHREAD -- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Success -- Found Threads: TRUE -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: riscv64 -- Unknown architecture -- Configuring done -- Generating done -- Build files have been written to: /home/sipeed/ollama/llm/build/linux/riscv64/cpu + cmake --build ../build/linux/riscv64/cpu --target ollama_llama_server -j8 [ 6%] Generating build details from Git [ 6%] Building C object CMakeFiles/ggml.dir/ggml.c.o [ 6%] Building C object CMakeFiles/ggml.dir/ggml-quants.c.o [ 12%] Building C object CMakeFiles/ggml.dir/ggml-alloc.c.o -- Found Git: /usr/bin/git (found version "2.40.1") [ 18%] Building CXX object CMakeFiles/ggml.dir/sgemm.cpp.o [ 25%] Building C object CMakeFiles/ggml.dir/ggml-backend.c.o [ 31%] Building CXX object common/CMakeFiles/build_info.dir/build-info.cpp.o [ 31%] Built target build_info [ 31%] Built target ggml [ 43%] Building CXX object CMakeFiles/llama.dir/unicode-data.cpp.o [ 43%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 43%] Building CXX object CMakeFiles/llama.dir/unicode.cpp.o [ 50%] Linking CXX static library libllama.a [ 50%] Built target llama [ 56%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o [ 56%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o [ 62%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o [ 62%] Building CXX object common/CMakeFiles/common.dir/grammar-parser.cpp.o [ 68%] Building CXX object common/CMakeFiles/common.dir/console.cpp.o [ 75%] Building CXX object common/CMakeFiles/common.dir/json-schema-to-grammar.cpp.o [ 81%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o [ 87%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o [ 87%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o [ 93%] Linking CXX static library libcommon.a [ 93%] Built target common [ 93%] Built target llava [ 93%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o [100%] Linking CXX executable ../bin/ollama_llama_server [100%] Built target ollama_llama_server + compress + echo 'Compressing payloads to reduce overall binary size...' Compressing payloads to reduce overall binary size... + pids= + rm -rf '../build/linux/riscv64/cpu/bin/*.gz' + for f in ${BUILD_DIR}/bin/* + pids+=' 17748' + gzip -n --best -f ../build/linux/riscv64/cpu/bin/ollama_llama_server + '[' -d ../build/linux/riscv64/cpu/lib ']' + echo + for pid in ${pids} + wait 17748 + echo 'Finished compression' Finished compression + '[' riscv64 == x86_64 ']' + '[' -z '' ']' + '[' -d /usr/local/cuda/lib64 ']' + '[' -z '' ']' + '[' -d /opt/cuda/targets/x86_64-linux/lib ']' + '[' -z '' ']' + CUDART_LIB_DIR= + '[' -d '' ']' + '[' -z '' ']' + ROCM_PATH=/opt/rocm + '[' -z '' ']' + '[' -d /usr/lib/cmake/CLBlast ']' + '[' -d /opt/rocm ']' + cleanup + cd ../llama.cpp/ + git checkout CMakeLists.txt 从索引区更新了 1 个路径 ++ ls -A ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff + '[' -n '../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-clip-fix.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-clip-fix.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 ++ cd ../build/linux/riscv64/cpu/.. ++ echo cpu + echo 'go generate completed. LLM runners: cpu' go generate completed. LLM runners: cpu # sipeed @ lpi4a in ~/ollama on git:main o [1:05:33] $ go build . # github.com/chewxy/math32 ../go/pkg/mod/github.com/chewxy/math32@v1.10.1/sqrt.go:3:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.10.1/log.go:76:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.10.1/exp.go:3:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.10.1/exp.go:57:6: missing function body ../go/pkg/mod/github.com/chewxy/math32@v1.10.1/remainder.go:33:6: missing function body # sipeed @ lpi4a in ~/ollama on git:main o [1:07:32] C:1 $ go get -u github.com/chewxy/math32@7caa3bb go: downloading github.com/chewxy/math32 v1.10.2-0.20240509203351-7caa3bba2ee1 go: upgraded github.com/chewxy/math32 v1.10.1 => v1.10.2-0.20240509203351-7caa3bba2ee1 # sipeed @ lpi4a in ~/ollama on git:main x [1:07:50] $ go build . # github.com/ollama/ollama /home/sipeed/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.22.0.linux-riscv64/pkg/tool/linux_riscv64/link: running gcc failed: exit status 1 /usr/bin/ld: /tmp/go-link-2984308571/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize': /tmp/go-build/llm.cgo2.c:69: undefined reference to `llama_model_quantize' /usr/bin/ld: /tmp/go-link-2984308571/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize_default_params': /tmp/go-build/llm.cgo2.c:86: undefined reference to `llama_model_quantize_default_params' /usr/bin/ld: /tmp/go-link-2984308571/000019.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_print_system_info': /tmp/go-build/llm.cgo2.c:103: undefined reference to `llama_print_system_info' collect2: 错误:ld 返回 1 ```
Author
Owner

@Tangweirui2021 commented on GitHub (Jun 28, 2024):

This bug also appears on Loongson CPU:

➜  ollama git:(main) ✗ go generate ./...                                                                                                      
+ set -o pipefail
+ echo 'Starting linux generate script'
Starting linux generate script
+ '[' -z '' ']'
+ '[' -x /usr/local/cuda/bin/nvcc ']'
++ command -v nvcc
+ export CUDACXX=
+ CUDACXX=
+ COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off'
++ dirname ./gen_linux.sh
+ source ./gen_common.sh
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=loongarch64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '' ']'
+ CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80'
+ git_module_setup
+ '[' -n '' ']'
+ '[' -d ../llama.cpp/gguf ']'
+ git submodule init
+ git submodule update --force ../llama.cpp
子模组路径 '../llama.cpp':检出 '7c26775adb579e92b59c82e8084c07a1d0f75e9c'
+ apply_patches
+ grep ollama ../llama.cpp/CMakeLists.txt
+ echo 'add_subdirectory(../ext_server ext_server) # ollama'
++ ls -A ../patches/01-load-progress.diff ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-default-pretokenizer.diff ../patches/06-qwen2.diff ../patches/07-gemma.diff
+ '[' -n '../patches/01-load-progress.diff
../patches/02-clip-log.diff
../patches/03-load_exception.diff
../patches/04-metal.diff
../patches/05-default-pretokenizer.diff
../patches/06-qwen2.diff
../patches/07-gemma.diff' ']'
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/01-load-progress.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout common/common.cpp
从索引区更新了 0 个路径
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout common/common.h
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/02-clip-log.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/03-load_exception.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/04-metal.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout ggml-metal.m
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/05-default-pretokenizer.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/06-qwen2.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/07-gemma.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/01-load-progress.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/02-clip-log.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/03-load_exception.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/04-metal.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/05-default-pretokenizer.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/06-qwen2.diff
+ for patch in ../patches/*.diff
+ cd ../llama.cpp
+ git apply ../patches/07-gemma.diff
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=loongarch64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ '[' -z '' -o '' = static ']'
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=loongarch64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ CMAKE_TARGETS='--target llama --target ggml'
+ CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ BUILD_DIR=../build/linux/loongarch64_static
+ echo 'Building static library'
Building static library
+ build
+ cmake -S ../llama.cpp -B ../build/linux/loongarch64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: loongarch64
-- loongarch64 detected
-- Configuring done (0.1s)
-- Generating done (0.1s)
-- Build files have been written to: /home/twr/src/ollama/llm/build/linux/loongarch64_static
+ cmake --build ../build/linux/loongarch64_static --target llama --target ggml -j8
[ 60%] Built target ggml
[ 60%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o
[ 80%] Linking CXX static library libllama.a
[100%] Built target llama
[100%] Built target ggml
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=loongarch64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ '[' -z '' ']'
+ '[' -n '' ']'
+ COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off'
+ '[' -z '' -o '' = cpu ']'
+ init_vars
+ case "${GOARCH}" in
++ uname -m
++ sed -e s/aarch64/arm64/g
+ ARCH=loongarch64
+ LLAMACPP_DIR=../llama.cpp
+ CMAKE_DEFS=
+ CMAKE_TARGETS='--target ollama_llama_server'
+ echo ''
+ grep -- -g
+ CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ case $(uname -s) in
++ uname -s
+ LIB_EXT=so
+ WHOLE_ARCHIVE=-Wl,--whole-archive
+ NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive
+ GCC_ARCH=
+ '[' -z '50;52;61;70;75;80' ']'
+ CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off '
+ BUILD_DIR=../build/linux/loongarch64/cpu
+ echo 'Building LCD CPU'
Building LCD CPU
+ build
+ cmake -S ../llama.cpp -B ../build/linux/loongarch64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off
-- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF
-- CMAKE_SYSTEM_PROCESSOR: loongarch64
-- loongarch64 detected
-- Configuring done (0.1s)
-- Generating done (0.1s)
-- Build files have been written to: /home/twr/src/ollama/llm/build/linux/loongarch64/cpu
+ cmake --build ../build/linux/loongarch64/cpu --target ollama_llama_server -j8
[  0%] Generating build details from Git
-- Found Git: /usr/bin/git (found version "2.43.0") 
[ 23%] Built target ggml
[ 23%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o
[ 23%] Generating build details from Git
-- Found Git: /usr/bin/git (found version "2.43.0") 
[ 30%] Built target build_info
[ 38%] Linking CXX static library libllama.a
[ 46%] Built target llama
[ 46%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o
[ 53%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o
[ 61%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o
[ 61%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o
[ 69%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o
[ 69%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o
In file included from /home/twr/src/ollama/llm/llama.cpp/examples/llava/clip.cpp:21:
/home/twr/src/ollama/llm/llama.cpp/examples/llava/../../common/stb_image.h: In function ‘int stbi__parse_png_file(stbi__png*, int, int)’:
/home/twr/src/ollama/llm/llama.cpp/examples/llava/../../common/stb_image.h:5450:31: 警告:writing 1 byte into a region of size 0 [-Wstringop-overflow=]
 5450 |                         tc[k] = (stbi_uc)(stbi__get16be(s) & 255) *
      |                         ~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 5451 |                                 stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger
      |                                 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/twr/src/ollama/llm/llama.cpp/examples/llava/../../common/stb_image.h:5326:28: 附注:at offset 3 into destination object ‘tc’ of size 3
 5326 |     stbi_uc has_trans = 0, tc[3] = {0};
      |                            ^~
[ 76%] Linking CXX static library libcommon.a
[ 92%] Built target common
[ 92%] Built target llava
[ 92%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o
[100%] Linking CXX executable ../bin/ollama_llama_server
[100%] Built target ollama_llama_server
+ compress
+ echo 'Compressing payloads to reduce overall binary size...'
Compressing payloads to reduce overall binary size...
+ pids=
+ rm -rf ../build/linux/loongarch64/cpu/bin/ollama_llama_server.gz
+ for f in ${BUILD_DIR}/bin/*
+ pids+=' 144504'
+ '[' -d ../build/linux/loongarch64/cpu/lib ']'
+ gzip -n --best -f ../build/linux/loongarch64/cpu/bin/ollama_llama_server
+ echo

+ for pid in ${pids}
+ wait 144504
+ echo 'Finished compression'
Finished compression
+ '[' loongarch64 == x86_64 ']'
+ '[' -z '' ']'
+ '[' -d /usr/local/cuda/lib64 ']'
+ '[' -z '' ']'
+ '[' -d /opt/cuda/targets/x86_64-linux/lib ']'
+ '[' -z '' ']'
+ CUDART_LIB_DIR=
+ '[' -z '' -a -d '' ']'
+ '[' -z '' ']'
+ ONEAPI_ROOT=/opt/intel/oneapi
+ '[' -z '' -a -d /opt/intel/oneapi ']'
+ '[' -z '' ']'
+ ROCM_PATH=/opt/rocm
+ '[' -z '' ']'
+ '[' -d /usr/lib/cmake/CLBlast ']'
+ '[' -z '' -a -d /opt/rocm ']'
+ cleanup
+ cd ../llama.cpp/
+ git checkout CMakeLists.txt
从索引区更新了 1 个路径
++ ls -A ../patches/01-load-progress.diff ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-default-pretokenizer.diff ../patches/06-qwen2.diff ../patches/07-gemma.diff
+ '[' -n '../patches/01-load-progress.diff
../patches/02-clip-log.diff
../patches/03-load_exception.diff
../patches/04-metal.diff
../patches/05-default-pretokenizer.diff
../patches/06-qwen2.diff
../patches/07-gemma.diff' ']'
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/01-load-progress.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout common/common.cpp
从索引区更新了 1 个路径
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout common/common.h
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/02-clip-log.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout examples/llava/clip.cpp
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/03-load_exception.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/04-metal.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout ggml-metal.m
从索引区更新了 1 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/05-default-pretokenizer.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/06-qwen2.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
+ for patch in ../patches/*.diff
++ grep '^+++ ' ../patches/07-gemma.diff
++ cut -f2 '-d '
++ cut -f2- -d/
+ for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/)
+ cd ../llama.cpp
+ git checkout llama.cpp
从索引区更新了 0 个路径
++ cd ../build/linux/loongarch64/cpu/..
++ echo cpu
+ echo 'go generate completed.  LLM runners: cpu'
go generate completed.  LLM runners: cpu
➜  ollama git:(main) ✗ go build .                                                                                                             
# github.com/ollama/ollama
/home/twr/src/go/pkg/tool/linux_loong64/link: running gcc failed: exit status 1
/usr/bin/ld: /tmp/go-link-1668899803/000020.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize':
/tmp/go-build/llm.cgo2.c:66:(.text+0x34): undefined reference to `llama_model_quantize'
/usr/bin/ld: /tmp/go-link-1668899803/000020.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize_default_params':
/tmp/go-build/llm.cgo2.c:83:(.text+0x9c): undefined reference to `llama_model_quantize_default_params'
/usr/bin/ld: /tmp/go-link-1668899803/000020.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_print_system_info':
/tmp/go-build/llm.cgo2.c:100:(.text+0x120): undefined reference to `llama_print_system_info'
collect2: 错误:ld 返回 1
<!-- gh-comment-id:2197251551 --> @Tangweirui2021 commented on GitHub (Jun 28, 2024): This bug also appears on Loongson CPU: ``` ➜ ollama git:(main) ✗ go generate ./... + set -o pipefail + echo 'Starting linux generate script' Starting linux generate script + '[' -z '' ']' + '[' -x /usr/local/cuda/bin/nvcc ']' ++ command -v nvcc + export CUDACXX= + CUDACXX= + COMMON_CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off' ++ dirname ./gen_linux.sh + source ./gen_common.sh + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=loongarch64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '' ']' + CMAKE_CUDA_ARCHITECTURES='50;52;61;70;75;80' + git_module_setup + '[' -n '' ']' + '[' -d ../llama.cpp/gguf ']' + git submodule init + git submodule update --force ../llama.cpp 子模组路径 '../llama.cpp':检出 '7c26775adb579e92b59c82e8084c07a1d0f75e9c' + apply_patches + grep ollama ../llama.cpp/CMakeLists.txt + echo 'add_subdirectory(../ext_server ext_server) # ollama' ++ ls -A ../patches/01-load-progress.diff ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-default-pretokenizer.diff ../patches/06-qwen2.diff ../patches/07-gemma.diff + '[' -n '../patches/01-load-progress.diff ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-default-pretokenizer.diff ../patches/06-qwen2.diff ../patches/07-gemma.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/01-load-progress.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout common/common.cpp 从索引区更新了 0 个路径 + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout common/common.h 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-default-pretokenizer.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/06-qwen2.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/07-gemma.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/01-load-progress.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/02-clip-log.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/03-load_exception.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/04-metal.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/05-default-pretokenizer.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/06-qwen2.diff + for patch in ../patches/*.diff + cd ../llama.cpp + git apply ../patches/07-gemma.diff + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=loongarch64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' -o '' = static ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=loongarch64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_TARGETS='--target llama --target ggml' + CMAKE_DEFS='-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/loongarch64_static + echo 'Building static library' Building static library + build + cmake -S ../llama.cpp -B ../build/linux/loongarch64_static -DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: loongarch64 -- loongarch64 detected -- Configuring done (0.1s) -- Generating done (0.1s) -- Build files have been written to: /home/twr/src/ollama/llm/build/linux/loongarch64_static + cmake --build ../build/linux/loongarch64_static --target llama --target ggml -j8 [ 60%] Built target ggml [ 60%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 80%] Linking CXX static library libllama.a [100%] Built target llama [100%] Built target ggml + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=loongarch64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + '[' -z '' ']' + '[' -n '' ']' + COMMON_CPU_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off' + '[' -z '' -o '' = cpu ']' + init_vars + case "${GOARCH}" in ++ uname -m ++ sed -e s/aarch64/arm64/g + ARCH=loongarch64 + LLAMACPP_DIR=../llama.cpp + CMAKE_DEFS= + CMAKE_TARGETS='--target ollama_llama_server' + echo '' + grep -- -g + CMAKE_DEFS='-DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + case $(uname -s) in ++ uname -s + LIB_EXT=so + WHOLE_ARCHIVE=-Wl,--whole-archive + NO_WHOLE_ARCHIVE=-Wl,--no-whole-archive + GCC_ARCH= + '[' -z '50;52;61;70;75;80' ']' + CMAKE_DEFS='-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off ' + BUILD_DIR=../build/linux/loongarch64/cpu + echo 'Building LCD CPU' Building LCD CPU + build + cmake -S ../llama.cpp -B ../build/linux/loongarch64/cpu -DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_BUILD_TYPE=Release -DLLAMA_SERVER_VERBOSE=off -- Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF -- CMAKE_SYSTEM_PROCESSOR: loongarch64 -- loongarch64 detected -- Configuring done (0.1s) -- Generating done (0.1s) -- Build files have been written to: /home/twr/src/ollama/llm/build/linux/loongarch64/cpu + cmake --build ../build/linux/loongarch64/cpu --target ollama_llama_server -j8 [ 0%] Generating build details from Git -- Found Git: /usr/bin/git (found version "2.43.0") [ 23%] Built target ggml [ 23%] Building CXX object CMakeFiles/llama.dir/llama.cpp.o [ 23%] Generating build details from Git -- Found Git: /usr/bin/git (found version "2.43.0") [ 30%] Built target build_info [ 38%] Linking CXX static library libllama.a [ 46%] Built target llama [ 46%] Building CXX object examples/llava/CMakeFiles/llava.dir/clip.cpp.o [ 53%] Building CXX object examples/llava/CMakeFiles/llava.dir/llava.cpp.o [ 61%] Building CXX object common/CMakeFiles/common.dir/sampling.cpp.o [ 61%] Building CXX object common/CMakeFiles/common.dir/common.cpp.o [ 69%] Building CXX object common/CMakeFiles/common.dir/train.cpp.o [ 69%] Building CXX object common/CMakeFiles/common.dir/ngram-cache.cpp.o In file included from /home/twr/src/ollama/llm/llama.cpp/examples/llava/clip.cpp:21: /home/twr/src/ollama/llm/llama.cpp/examples/llava/../../common/stb_image.h: In function ‘int stbi__parse_png_file(stbi__png*, int, int)’: /home/twr/src/ollama/llm/llama.cpp/examples/llava/../../common/stb_image.h:5450:31: 警告:writing 1 byte into a region of size 0 [-Wstringop-overflow=] 5450 | tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * | ~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5451 | stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /home/twr/src/ollama/llm/llama.cpp/examples/llava/../../common/stb_image.h:5326:28: 附注:at offset 3 into destination object ‘tc’ of size 3 5326 | stbi_uc has_trans = 0, tc[3] = {0}; | ^~ [ 76%] Linking CXX static library libcommon.a [ 92%] Built target common [ 92%] Built target llava [ 92%] Building CXX object ext_server/CMakeFiles/ollama_llama_server.dir/server.cpp.o [100%] Linking CXX executable ../bin/ollama_llama_server [100%] Built target ollama_llama_server + compress + echo 'Compressing payloads to reduce overall binary size...' Compressing payloads to reduce overall binary size... + pids= + rm -rf ../build/linux/loongarch64/cpu/bin/ollama_llama_server.gz + for f in ${BUILD_DIR}/bin/* + pids+=' 144504' + '[' -d ../build/linux/loongarch64/cpu/lib ']' + gzip -n --best -f ../build/linux/loongarch64/cpu/bin/ollama_llama_server + echo + for pid in ${pids} + wait 144504 + echo 'Finished compression' Finished compression + '[' loongarch64 == x86_64 ']' + '[' -z '' ']' + '[' -d /usr/local/cuda/lib64 ']' + '[' -z '' ']' + '[' -d /opt/cuda/targets/x86_64-linux/lib ']' + '[' -z '' ']' + CUDART_LIB_DIR= + '[' -z '' -a -d '' ']' + '[' -z '' ']' + ONEAPI_ROOT=/opt/intel/oneapi + '[' -z '' -a -d /opt/intel/oneapi ']' + '[' -z '' ']' + ROCM_PATH=/opt/rocm + '[' -z '' ']' + '[' -d /usr/lib/cmake/CLBlast ']' + '[' -z '' -a -d /opt/rocm ']' + cleanup + cd ../llama.cpp/ + git checkout CMakeLists.txt 从索引区更新了 1 个路径 ++ ls -A ../patches/01-load-progress.diff ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-default-pretokenizer.diff ../patches/06-qwen2.diff ../patches/07-gemma.diff + '[' -n '../patches/01-load-progress.diff ../patches/02-clip-log.diff ../patches/03-load_exception.diff ../patches/04-metal.diff ../patches/05-default-pretokenizer.diff ../patches/06-qwen2.diff ../patches/07-gemma.diff' ']' + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/01-load-progress.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout common/common.cpp 从索引区更新了 1 个路径 + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout common/common.h 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/02-clip-log.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout examples/llava/clip.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/03-load_exception.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/04-metal.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout ggml-metal.m 从索引区更新了 1 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/05-default-pretokenizer.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/06-qwen2.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 + for patch in ../patches/*.diff ++ grep '^+++ ' ../patches/07-gemma.diff ++ cut -f2 '-d ' ++ cut -f2- -d/ + for file in $(grep "^+++ " ${patch} | cut -f2 -d' ' | cut -f2- -d/) + cd ../llama.cpp + git checkout llama.cpp 从索引区更新了 0 个路径 ++ cd ../build/linux/loongarch64/cpu/.. ++ echo cpu + echo 'go generate completed. LLM runners: cpu' go generate completed. LLM runners: cpu ➜ ollama git:(main) ✗ go build . # github.com/ollama/ollama /home/twr/src/go/pkg/tool/linux_loong64/link: running gcc failed: exit status 1 /usr/bin/ld: /tmp/go-link-1668899803/000020.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize': /tmp/go-build/llm.cgo2.c:66:(.text+0x34): undefined reference to `llama_model_quantize' /usr/bin/ld: /tmp/go-link-1668899803/000020.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_model_quantize_default_params': /tmp/go-build/llm.cgo2.c:83:(.text+0x9c): undefined reference to `llama_model_quantize_default_params' /usr/bin/ld: /tmp/go-link-1668899803/000020.o: in function `_cgo_d85a20fb2d9c_Cfunc_llama_print_system_info': /tmp/go-build/llm.cgo2.c:100:(.text+0x120): undefined reference to `llama_print_system_info' collect2: 错误:ld 返回 1 ```
Sign in to join this conversation.
1 Participants
Notifications
Due Date
No due date set.
Dependencies

No dependencies set.

Reference: github-starred/ollama#64720