mirror of
https://github.com/open-webui/open-webui.git
synced 2026-04-30 01:10:17 -05:00
refac
This commit is contained in:
@@ -57,6 +57,7 @@ from open_webui.utils.misc import (
|
|||||||
|
|
||||||
from open_webui.utils.auth import get_admin_user, get_verified_user
|
from open_webui.utils.auth import get_admin_user, get_verified_user
|
||||||
from open_webui.utils.headers import include_user_info_headers
|
from open_webui.utils.headers import include_user_info_headers
|
||||||
|
from open_webui.utils.anthropic import is_anthropic_url, get_anthropic_models
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -68,6 +69,8 @@ log = logging.getLogger(__name__)
|
|||||||
##########################################
|
##########################################
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
async def send_get_request(url, key=None, user: UserModel = None):
|
async def send_get_request(url, key=None, user: UserModel = None):
|
||||||
timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST)
|
timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST)
|
||||||
try:
|
try:
|
||||||
@@ -91,6 +94,12 @@ async def send_get_request(url, key=None, user: UserModel = None):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
async def get_models_request(url, key=None, user: UserModel = None):
|
||||||
|
if is_anthropic_url(url):
|
||||||
|
return await get_anthropic_models(url, key, user=user)
|
||||||
|
return await send_get_request(f"{url}/models", key, user=user)
|
||||||
|
|
||||||
|
|
||||||
def openai_reasoning_model_handler(payload):
|
def openai_reasoning_model_handler(payload):
|
||||||
"""
|
"""
|
||||||
Handle reasoning model specific parameters
|
Handle reasoning model specific parameters
|
||||||
@@ -366,11 +375,7 @@ async def get_all_models_responses(request: Request, user: UserModel) -> list:
|
|||||||
for idx, url in enumerate(api_base_urls):
|
for idx, url in enumerate(api_base_urls):
|
||||||
if (str(idx) not in api_configs) and (url not in api_configs): # Legacy support
|
if (str(idx) not in api_configs) and (url not in api_configs): # Legacy support
|
||||||
request_tasks.append(
|
request_tasks.append(
|
||||||
send_get_request(
|
get_models_request(url, api_keys[idx], user=user)
|
||||||
f"{url}/models",
|
|
||||||
api_keys[idx],
|
|
||||||
user=user,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
api_config = api_configs.get(
|
api_config = api_configs.get(
|
||||||
@@ -384,11 +389,7 @@ async def get_all_models_responses(request: Request, user: UserModel) -> list:
|
|||||||
if enable:
|
if enable:
|
||||||
if len(model_ids) == 0:
|
if len(model_ids) == 0:
|
||||||
request_tasks.append(
|
request_tasks.append(
|
||||||
send_get_request(
|
get_models_request(url, api_keys[idx], user=user)
|
||||||
f"{url}/models",
|
|
||||||
api_keys[idx],
|
|
||||||
user=user,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
model_list = {
|
model_list = {
|
||||||
@@ -594,6 +595,10 @@ async def get_models(
|
|||||||
"data": api_config.get("model_ids", []) or [],
|
"data": api_config.get("model_ids", []) or [],
|
||||||
"object": "list",
|
"object": "list",
|
||||||
}
|
}
|
||||||
|
elif is_anthropic_url(url):
|
||||||
|
models = await get_anthropic_models(url, key, user=user)
|
||||||
|
if models is None:
|
||||||
|
raise Exception("Failed to connect to Anthropic API")
|
||||||
else:
|
else:
|
||||||
async with session.get(
|
async with session.get(
|
||||||
f"{url}/models",
|
f"{url}/models",
|
||||||
@@ -602,7 +607,6 @@ async def get_models(
|
|||||||
ssl=AIOHTTP_CLIENT_SESSION_SSL,
|
ssl=AIOHTTP_CLIENT_SESSION_SSL,
|
||||||
) as r:
|
) as r:
|
||||||
if r.status != 200:
|
if r.status != 200:
|
||||||
# Extract response error details if available
|
|
||||||
error_detail = f"HTTP Error: {r.status}"
|
error_detail = f"HTTP Error: {r.status}"
|
||||||
try:
|
try:
|
||||||
res = await r.json()
|
res = await r.json()
|
||||||
@@ -614,9 +618,7 @@ async def get_models(
|
|||||||
|
|
||||||
response_data = await r.json()
|
response_data = await r.json()
|
||||||
|
|
||||||
# Check if we're calling OpenAI API based on the URL
|
|
||||||
if "api.openai.com" in url:
|
if "api.openai.com" in url:
|
||||||
# Filter models according to the specified conditions
|
|
||||||
response_data["data"] = [
|
response_data["data"] = [
|
||||||
model
|
model
|
||||||
for model in response_data.get("data", [])
|
for model in response_data.get("data", [])
|
||||||
@@ -707,6 +709,17 @@ async def verify_connection(
|
|||||||
)
|
)
|
||||||
|
|
||||||
return response_data
|
return response_data
|
||||||
|
elif is_anthropic_url(url):
|
||||||
|
result = await get_anthropic_models(url, key)
|
||||||
|
if result is None:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=500, detail="Failed to connect to Anthropic API"
|
||||||
|
)
|
||||||
|
if "error" in result:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=500, detail=result["error"]
|
||||||
|
)
|
||||||
|
return result
|
||||||
else:
|
else:
|
||||||
async with session.get(
|
async with session.get(
|
||||||
f"{url}/models",
|
f"{url}/models",
|
||||||
|
|||||||
80
backend/open_webui/utils/anthropic.py
Normal file
80
backend/open_webui/utils/anthropic.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
import logging
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
from open_webui.env import (
|
||||||
|
AIOHTTP_CLIENT_SESSION_SSL,
|
||||||
|
AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST,
|
||||||
|
ENABLE_FORWARD_USER_INFO_HEADERS,
|
||||||
|
)
|
||||||
|
from open_webui.models.users import UserModel
|
||||||
|
from open_webui.utils.headers import include_user_info_headers
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def is_anthropic_url(url: str) -> bool:
|
||||||
|
"""Check if the URL is an Anthropic API endpoint."""
|
||||||
|
return "api.anthropic.com" in url
|
||||||
|
|
||||||
|
|
||||||
|
async def get_anthropic_models(url: str, key: str, user: UserModel = None) -> dict:
|
||||||
|
"""
|
||||||
|
Fetch models from Anthropic's /v1/models endpoint with pagination.
|
||||||
|
Normalizes the response to OpenAI format.
|
||||||
|
"""
|
||||||
|
timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_MODEL_LIST)
|
||||||
|
all_models = []
|
||||||
|
after_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
|
||||||
|
headers = {
|
||||||
|
"x-api-key": key,
|
||||||
|
"anthropic-version": "2023-06-01",
|
||||||
|
}
|
||||||
|
|
||||||
|
if ENABLE_FORWARD_USER_INFO_HEADERS and user:
|
||||||
|
headers = include_user_info_headers(headers, user)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
params = {"limit": 1000}
|
||||||
|
if after_id:
|
||||||
|
params["after_id"] = after_id
|
||||||
|
|
||||||
|
async with session.get(
|
||||||
|
f"{url}/models",
|
||||||
|
headers=headers,
|
||||||
|
params=params,
|
||||||
|
ssl=AIOHTTP_CLIENT_SESSION_SSL,
|
||||||
|
) as response:
|
||||||
|
if response.status != 200:
|
||||||
|
error_detail = f"HTTP Error: {response.status}"
|
||||||
|
try:
|
||||||
|
res = await response.json()
|
||||||
|
if "error" in res:
|
||||||
|
error_detail = f"External Error: {res['error']}"
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return {"object": "list", "data": [], "error": error_detail}
|
||||||
|
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
for model in data.get("data", []):
|
||||||
|
all_models.append({
|
||||||
|
"id": model.get("id"),
|
||||||
|
"object": "model",
|
||||||
|
"created": 0,
|
||||||
|
"owned_by": "anthropic",
|
||||||
|
"name": model.get("display_name", model.get("id")),
|
||||||
|
})
|
||||||
|
|
||||||
|
if not data.get("has_more", False):
|
||||||
|
break
|
||||||
|
after_id = data.get("last_id")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Anthropic connection error: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {"object": "list", "data": all_models}
|
||||||
Reference in New Issue
Block a user