mirror of
https://github.com/open-webui/open-webui.git
synced 2026-05-07 11:28:35 -05:00
[GH-ISSUE #16229] issue: openai api error, when image_url in content #33361
Reference in New Issue
Block a user
Delete Branch "%!s()"
Deleting a branch is permanent. Although the deleted branch may continue to exist for a short time before it actually gets removed, it CANNOT be undone in most cases. Continue?
Originally created by @Nehc on GitHub (Aug 2, 2025).
Original GitHub issue: https://github.com/open-webui/open-webui/issues/16229
Check Existing Issues
Installation Method
Docker
Open WebUI Version
v0.6.18
Ollama Version (if applicable)
No response
Operating System
ubuntu
Browser (if applicable)
No response
Confirmation
README.md.Expected Behavior
Еxpect that the endpoints will work identically, but for some reason
/ollama/v1works, but/apidoes not.Actual Behavior
/apicauses an error.Steps to Reproduce
Logs & Screenshots
BadRequestError Traceback (most recent call last)
Cell In[74], line 11
8 with open("test.png", "rb") as image_file:
9 encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
---> 11 response = client.chat.completions.create(
12 model="qwen2.5vl:latest",
13 messages=[
14 {
15 "role": "user",
16 "content":[
17 {"type": "text", "text": "What's in the picture?"},
18 {
19 "type": "image_url",
20 "image_url": f"data:image/png;base64,{encoded_string}"
21 },
22 ],
23 },
24 ],
25 )
26 print(response.choices[0].message.content)
File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\site-packages\openai_utils_utils.py:274, in required_args..inner..wrapper(*args, **kwargs)
272 msg = f"Missing required argument: {quote(missing[0])}"
273 raise TypeError(msg)
--> 274 return func(*args, **kwargs)
File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\site-packages\openai\resources\chat\completions.py:742, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, n, parallel_tool_calls, presence_penalty, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
704 @required_args(["messages", "model"], ["messages", "model", "stream"])
705 def create(
706 self,
(...)
739 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
740 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
741 validate_response_format(response_format)
--> 742 return self._post(
743 "/chat/completions",
744 body=maybe_transform(
745 {
746 "messages": messages,
747 "model": model,
748 "frequency_penalty": frequency_penalty,
749 "function_call": function_call,
750 "functions": functions,
751 "logit_bias": logit_bias,
752 "logprobs": logprobs,
753 "max_completion_tokens": max_completion_tokens,
754 "max_tokens": max_tokens,
755 "metadata": metadata,
756 "n": n,
757 "parallel_tool_calls": parallel_tool_calls,
758 "presence_penalty": presence_penalty,
759 "response_format": response_format,
760 "seed": seed,
761 "service_tier": service_tier,
762 "stop": stop,
763 "store": store,
764 "stream": stream,
765 "stream_options": stream_options,
766 "temperature": temperature,
767 "tool_choice": tool_choice,
768 "tools": tools,
769 "top_logprobs": top_logprobs,
770 "top_p": top_p,
771 "user": user,
772 },
773 completion_create_params.CompletionCreateParams,
774 ),
775 options=make_request_options(
776 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
777 ),
778 cast_to=ChatCompletion,
779 stream=stream or False,
780 stream_cls=Stream[ChatCompletionChunk],
781 )
File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\site-packages\openai_base_client.py:1277, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1263 def post(
1264 self,
1265 path: str,
(...)
1272 stream_cls: type[_StreamT] | None = None,
1273 ) -> ResponseT | _StreamT:
1274 opts = FinalRequestOptions.construct(
1275 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1276 )
-> 1277 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\site-packages\openai_base_client.py:954, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
951 else:
952 retries_taken = 0
--> 954 return self._request(
955 cast_to=cast_to,
956 options=options,
957 stream=stream,
958 stream_cls=stream_cls,
959 retries_taken=retries_taken,
960 )
File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.12_qbz5n2kfra8p0\LocalCache\local-packages\Python312\site-packages\openai_base_client.py:1058, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls)
1055 err.response.read()
1057 log.debug("Re-raising status error")
-> 1058 raise self._make_status_error_from_response(err.response) from None
1060 return self._process_response(
1061 cast_to=cast_to,
1062 options=options,
(...)
1066 retries_taken=retries_taken,
1067 )
BadRequestError: Error code: 400 - {'detail': "'str' object has no attribute 'get'"}
@rgaricano commented on GitHub (Aug 3, 2025):
doesn't work because endpoint is /api/v1
@Nehc commented on GitHub (Aug 3, 2025):
No, it's not. This is how this API is described in the documentation.
ollama api - yes, openai compatible api requires v1
@tjbck commented on GitHub (Aug 4, 2025):
@Nehc
You've supplied an incorrect payload format for the endpoint.
@Nehc commented on GitHub (Sep 8, 2025):
I didn't understand this answer: You've supplied an incorrect payload format for the endpoint.
I use two endpoints exactly the same, assuming they are BOTH openai compatible. Is there any difference? Is one of them depricated? Which one?
client = OpenAI(api_key="<my_api_key>", base_url="http://localhost:3000/ollama/v1") # worked!
client = OpenAI(api_key="<my_api_key>", base_url="http://localhost:3000/api/") # do not worked...
in other aspects (just chat, for example, without a picture) these endpoints work the same.